query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Gets a default or paginated collection of LSs. [Arguments] | def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):
return self.ls.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit",
"def list(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"_models.StaticSiteCollection\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.StaticSiteCollection\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-01-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('StaticSiteCollection', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )",
"def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])",
"def get(self):\r\n return get_all()",
"def get_all(self, marker=None, limit=None,\n sort_key='name', sort_dir='asc'):\n\n services = self._get_services(marker,\n limit,\n sort_key,\n sort_dir)\n\n return ServicesCollection.convert_with_links(services, limit,\n sort_key=sort_key,\n sort_dir=sort_dir)",
"def global_service_collection():\n\tglobal global_lsc\n\t# If this is the first call then the object is not yet created\n\tif not global_lsc:\n\t\t# Create the global object\n\t\tglobal_lsc = LadonServiceCollection()\n\treturn global_lsc",
"def getListingLists(**kwargs):",
"def list(options=None):\n if options is None:\n return requests.get('/')\n else:\n return requests.get('/', options)",
"def listings(self, b_start=None, b_size=None):\n if b_size == None:\n b_size = self.batch_size\n if b_start == None:\n b_start = (getattr(self, 'page', 1) - 1) * b_size\n if self.context.portal_type == 'Folder':\n content_filter = {\n 'b_start': b_start,\n 'b_size': b_size,\n 'portal_type': 'Event',\n 'sort_on': 'start',\n 'sort_order': 'ascending',\n 'review_state': 'published',\n 'start': {'query': DateTime(), 'range': 'min'},\n }\n items = self.context.getFolderContents(\n content_filter, batch=True\n )\n elif self.context.portal_type == 'Topic':\n if b_start and not self.request.get('b_start'):\n self.request['b_start'] = b_start\n items = self.context.queryCatalog(self.request, True, b_size)\n elif self.context.portal_type == 'Collection':\n items = self.context.results(True, b_start=b_start, b_size=b_size)\n else:\n items = []\n return items",
"def get_list(self, *args, **kwargs):\n pass",
"def get_list(self, *args, **kwargs):\n pass",
"def ls(filter=None):",
"def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)",
"def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)",
"def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)",
"def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)",
"def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)",
"def get_list(self, request, **kwargs):\n # TODO: Uncached for now. Invalidation that works for everyone may be\n # impossible.\n objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))\n sorted_objects = self.apply_sorting(objects, options=request.GET)\n\n paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(),\n limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)\n to_be_serialized = paginator.page()\n\n # Dehydrate the bundles in preparation for serialization.\n bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]\n to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles]\n to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)\n\n self.add_shit_to_meta(request, to_be_serialized)\n\n return self.create_response(request, to_be_serialized)",
"def obj_get_list(self, request=None, **kwargs):\n filter_object = self.get_filter_object(request)\n list = self.get_collection(request).find(filter_object)\n order_field, direction = self.get_order_field_and_direction(request)\n \n if (order_field is not None):\n list.sort(order_field, direction)\n \n return map(Document, list)",
"def getLSData(*args):\n return args[0].Data.LSData.ls_data",
"def get(self):\n return GenericGet().get_catalogs()",
"def get(self, *args):\n return _libsbml.ListWrapperSBase_get(self, *args)",
"def list(self,\n cursor=None,\n default_service=None,\n include_mark_for_delete_objects=None,\n included_fields=None,\n page_size=None,\n sort_ascending=None,\n sort_by=None,\n ):\n return self._invoke('list',\n {\n 'cursor': cursor,\n 'default_service': default_service,\n 'include_mark_for_delete_objects': include_mark_for_delete_objects,\n 'included_fields': included_fields,\n 'page_size': page_size,\n 'sort_ascending': sort_ascending,\n 'sort_by': sort_by,\n })",
"def command_ls(args):\n _perform_environment_check()\n\n if args[\"l\"] and args[\"L\"]:\n exit_with_error(\n \"The -l and -L switches of the ls command are incompatible.\")\n\n session = setup_session()\n expanded_queries = _expand_query_list(session, args[\"queries\"],\n args[\"recursive\"], args[\"verbose\"])\n query_results = retrieve_object_info(\n session, expanded_queries, args[\"sort\"])\n if args[\"l\"] or args[\"L\"]:\n _ls_print_results(query_results, args)\n else:\n dedup_results = _replica_results_dedup(query_results)\n _ls_print_results(dedup_results, args)",
"def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = request.query_params.get('page', 1)\n paginator = Paginator(queryset, 8)\n\n try:\n queryset = paginator.page(page)\n\n except PageNotAnInteger:\n queryset = paginator.page(1)\n\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n page = int(page)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response({'items': serializer.data, 'page': page, 'pages': paginator.num_pages})",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def ls ( *args , **kwargs ) :\n _g = appMgr()\n _es = _g.evtSvc()\n return _es.ls ( *args , **kwargs )",
"def list(self):\n return self.request(\"GET\")",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def list(default_view):\n ListCommandExecutor(default_view).list()"
] | [
"0.60762984",
"0.5760143",
"0.5707024",
"0.5635417",
"0.56306773",
"0.55461514",
"0.5524759",
"0.54975986",
"0.5490331",
"0.54048747",
"0.54048747",
"0.53754765",
"0.53707415",
"0.53707415",
"0.53707415",
"0.53707415",
"0.53707415",
"0.5364701",
"0.5357447",
"0.53344554",
"0.53177106",
"0.53030556",
"0.5291816",
"0.528821",
"0.52862746",
"0.52721506",
"0.52561677",
"0.5248621",
"0.5243482",
"0.5233952"
] | 0.59576935 | 1 |
Get existing compatibility report [Example] ${resp} = Fusion Api Get Security Compatibility Report | | | | def fusion_api_get_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):
return self.security_standards.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_compatibility_report(self, uri, param='', api=None, headers=None):\n return self.migratableVcDomain.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_create_security_compatibility_report(self, body, uri=None, api=None, headers=None, param='/compatibility-report'):\n return self.security_standards.post(uri=uri, api=api, headers=headers, body=body, param=param)",
"def fusion_api_update_security_compatibility_report(self, body, uri=None, api=None, headers=None, param='/compatibility-report?force=true'):\n return self.security_standards.post(uri=uri, api=api, headers=headers, body=body, param=param)",
"def print_response(response):\n #fyi this is not my code, i grabbed it from github\n #forgot to copy the url though\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n\n for row in report.get('data', {}).get('rows', []):\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n print header + ': ' + dimension\n\n for i, values in enumerate(dateRangeValues):\n print 'Date range: ' + str(i)\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n print metricHeader.get('name') + ': ' + value",
"def compliance(self) -> pulumi.Output['outputs.ComplianceNoteResponse']:\n return pulumi.get(self, \"compliance\")",
"def hardware_report(report_type, report_request):\n return subprocess.check_output(['/opt/dell/srvadmin/bin/omreport',\n report_type,\n report_request]).decode('UTF-8')",
"def status():\n return jsonify(service='scwr-api-requirements', status='ok')",
"def get_patient_status():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/3\")\n print(r.text)",
"def main():\n reportSample = CompatibilityReportSample()\n reportSample.run()",
"def get_applicable_components(ip_address, headers, dup_payload):\n # Parse the single dup update report and print out versions needing\n # an update. In addition add them to the target_data as needed for\n # the job payload\n target_data = []\n dup_url = 'https://%s/api/UpdateService/Actions/UpdateService.GetSingleDupReport' % ip_address\n dup_resp = requests.post(dup_url, headers=headers,\n data=json.dumps(dup_payload), verify=False)\n if dup_resp.status_code == 200:\n dup_data = dup_resp.json()\n file_token = str(dup_payload['SingleUpdateReportFileToken'])\n for device in dup_data:\n device_name = str(device['DeviceReport']['DeviceServiceTag'])\n device_ip = str(device['DeviceReport']['DeviceIPAddress'])\n for component in device['DeviceReport']['Components']:\n curr_ver = str(component['ComponentCurrentVersion'])\n avail_ver = str(component['ComponentVersion'])\n upd_action = str(component['ComponentUpdateAction'])\n update_crit = str(component['ComponentCriticality'])\n reboot_req = str(component['ComponentRebootRequired'])\n comp_name = str(component['ComponentName'])\n print(\"\\n---------------------------------------------------\")\n print(\"Device =\", device_name)\n print(\"IPAddress =\", device_ip)\n print(\"Current Ver =\", curr_ver)\n print(\"Avail Ver =\", avail_ver)\n print(\"Action =\", upd_action)\n print(\"Criticality =\", update_crit)\n print(\"Reboot Req =\", reboot_req)\n print(\"Component Name =\", comp_name)\n\n if avail_ver > curr_ver:\n temp_map = {'Id': device['DeviceId'],\n 'Data': str(component['ComponentSourceName']) + \"=\" + file_token, 'TargetType': {}}\n temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId'])\n temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName'])\n target_data.append(temp_map)\n else:\n print(\"Unable to get components DUP applies to .. Exiting\")\n return target_data",
"def vt_parse_report(response, resource):\n nb_tested = len(response[\"scans\"])\n nb_detected = sum(1 for av,res in response[\"scans\"].items()\n if res[\"detected\"])\n\n if \"md5\" in response:\n md5 = response[\"md5\"]\n\n elif \"url\" in response:\n dig = hashlib.md5()\n dig.update(response[\"url\"].encode(\"utf8\"))\n md5 = dig.hexdigest()\n\n return {\"timestamp\": int(time.time()),\n \"status\": status_from_percentage(nb_detected / nb_tested),\n \"md5\": md5,\n \"resource\": json.loads(resource)[\"resource\"]}",
"def get(self, request, format=None):\n param_report = self.request.query_params.get('report', None)\n start_date = self.request.query_params.get('start_date', '')\n end_date = self.request.query_params.get('end_date', '')\n detailed_report = self.request.query_params.get('detailed_report', 'false')\n\n if start_date == '':\n start_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n start_date = start_date.replace(\"T\", \" \", 1)\n if end_date == '':\n end_date = datetime.date.today().strftime(\"%Y-%m-%d 16:00:00\")\n else:\n end_date = end_date.replace(\"T\", \" \", 1)\n\n if param_report is None or param_report == \"\":\n return Response(\"No report specified\", status=status.HTTP_400_BAD_REQUEST)\n\n table_html = None\n table_title = None\n table_subtitle = \"\"\"%s - %s\"\"\" % (start_date, end_date)\n\n # if param_report == \"police_division_summary_report\":\n # table_html = get_police_division_summary()\n # table_title = \"Police Division Summary Report\"\n\n if param_report == \"category_wise_summary_report\":\n table_html = get_category_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Category\"\n\n elif param_report == \"mode_wise_summary_report\":\n table_html = get_mode_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Mode\"\n\n elif param_report == \"district_wise_summary_report\":\n table_html = get_district_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by District\"\n\n elif param_report == \"severity_wise_summary_report\":\n table_html = get_severity_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Severity\"\n\n elif param_report == \"subcategory_wise_summary_report\":\n table_html = get_subcategory_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Subcategory\"\n\n elif param_report == \"status_wise_summary_report\":\n table_html = get_status_summary(start_date, end_date, detailed_report)\n table_title = \"No. of Incidents by Status\"\n\n if table_html is None:\n return Response(\"Report not found\", status=status.HTTP_400_BAD_REQUEST)\n\n table_html = apply_style(\n table_html.replace(\".0\", \"\", -1).replace(\"(Total No. of Incidents)\",\n \"<strong>(Total No. of Incidents)</strong>\", 1).replace(\n \"(Unassigned)\", \"<strong>(Unassigned)</strong>\", 1), table_title, table_subtitle)\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Report.pdf\"'\n pisa.CreatePDF(table_html, dest=response)\n\n return response",
"def print_response(response):\n for report in response.get('reports', []):\n rows = report.get('data', {}).get('rows', [])\n for row in rows:\n print(row)",
"def get_result_xls(sdk_version, live_push_version, play_duration, lf=0, mode_type=MODE_UDP, start_time=None,\n end_time=None, bandwidth='2M'):\n db_obj = MysqlDB(MYSQL_HOST, MYSQL_UE_USER, MYSQL_PASSWORD, MYSQL_DB_NAME)\n file_name = EXCEL_PATH + 'result_' + sdk_version + '_' + live_push_version + '.xls'\n summary_file_name = EXCEL_PATH + 'summary_result.xls'\n table_name = 'sdk' + sdk_version + '+livepush' + live_push_version\n results = condition_select_v2(db_obj, sdk_version=sdk_version, live_push_version=live_push_version, mode=mode_type,\n play_duration=play_duration, lf_number=lf, band_width=bandwidth,\n start_time=start_time, end_time=end_time)\n data = [EXCEL_ROW0]\n data += results\n if os.path.isfile(file_name):\n append_data2xls(file_name, table_name, results)\n else:\n new_excel = xlwt.Workbook()\n table = new_excel.add_sheet(table_name)\n write_xls(table, data)\n new_excel.save(file_name)\n\n copy_sheet_to_other_file(file_name, summary_file_name, table_name)",
"async def get(self) -> web.Response:\n response = {\n \"status\": \"success\",\n \"Data\": \"No current tools supported\",\n }\n\n return web.json_response(data=response, status=200)",
"def getIssStatus():\n issURL = 'http://api.open-notify.org/iss-now.json'\n r = requests.get(issURL)\n return r.json()",
"def test_get_single_report(self): \n from rest_framework.test import APIClient\n client = APIClient()\n \n response = self.client.get('/api/reports/epic/',\n HTTP_AUTHORIZATION='Token ' + self.token_admin,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"message\"], \"You have no permissions\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def get(self):\n result = subprocess.run([\"axicli\", \"--mode\", \"manual\"], capture_output=True)\n return json.dumps({\"version\": str(result.stderr)})",
"def fusion_api_delete_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):\n return self.security_standards.delete(uri=uri, api=api, headers=headers, param=param)",
"def vt_report(command, resource, key):\n if command == \"file_report\":\n url = \"https://www.virustotal.com/vtapi/v2/file/report\"\n elif command == \"url_report\":\n url = \"https://www.virustotal.com/vtapi/v2/url/report\"\n else:\n return None\n\n res = json.loads(resource)[\"scan_id\"]\n response = retrieve_report(res, url, key)\n return json.loads(response.text)",
"def getReport(id):\r\n\tglobal my_api_key\r\n\turl = \"https://www.virustotal.com/vtapi/v2/file/report\"\r\n\tparameters = {\"resource\": id, \"apikey\": my_api_key}\r\n\tdata = urllib.urlencode(parameters)\t\r\n\treq = urllib2.Request(url, data)\r\n\tresponse = urllib2.urlopen(req)\r\n\tjson = simplejson.loads(response.read())\r\n\treturn json",
"def print_response(response):\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n\n for row in rows:\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n print(header + ': ' + dimension)\n\n for i, values in enumerate(dateRangeValues):\n print('Date range (' + str(i) + ')')\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n\t print(metricHeader.get('name') + ': ' + value)",
"def get_version_information_sheet(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}/stackable-versions\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get Stackable version information sheet returned error code {response.status_code}\")\n return \"No Stackable version information available.\"\n return response.text",
"def print_response(response, filename='export.csv'):\n \"\"\"\n structure\n \n response['reports'][0]['data']['rows'] #returns a list of metrics and dimensions values\n [\n {u'metrics': [{u'values': [u'1446', u'4592', u'891', u'249', u'195', u'61']}], u'dimensions': [u'20170408', u'(none)', u'New Visitor', u'desktop']},\n {u'metrics': [{u'values': [u'162', u'543', u'206', u'5', u'5', u'0']}], u'dimensions': [u'20170409', u'referral', u'New Visitor', u'desktop']},\n {u'metrics': [{u'values': [u'1', u'1', u'1', u'0', u'0', u'0']}], u'dimensions': [u'20170408', u'display', u'Returning Visitor', u'desktop']}\n\n ]\n\n\n response['reports'][0]['columnHeader'] #returns the header\n {u'dimensions': [\n u'ga:date',\n u'ga:medium',\n u'ga:userType',\n u'ga:deviceCategory'\n ],\n u'metricHeader': {u'metricHeaderEntries': [\n {u'type': u'INTEGER', u'name': u'ga:sessions'},\n {u'type': u'INTEGER', u'name': u'ga:pageviews'},\n {u'type': u'INTEGER', u'name': u'ga:productDetailViews'},\n {u'type': u'INTEGER', u'name': u'ga:productAddsToCart'},\n {u'type': u'INTEGER', u'name': u'ga:productCheckouts'},\n {u'type': u'INTEGER', u'name': u'ga:uniquePurchases'}]}}\n\n \"\"\"\n print \"writing\", filename\n #write in csv\n #write header\n with open(filename, 'wb') as csvfile:\n writer = csv.writer(csvfile,\n delimiter=',',\n quoting=csv.QUOTE_MINIMAL\n )\n writer.writerow(['date',\n 'medium',\n 'userType',\n 'deviceCategory',\n 'sessions',\n 'pageviews',\n 'productDetailViews',\n 'productAddToCart',\n 'productCheckouts',\n 'uniquePurchases'\n ])\n #get variables\n for line in response['reports'][0]['data']['rows']:\n date = str(line['dimensions'][0])\n medium = str(line['dimensions'][1])\n userType = str(line['dimensions'][2])\n deviceCategory = str(line['dimensions'][3])\n sessions = str(line['metrics'][0]['values'][0])\n pageviews = str(line['metrics'][0]['values'][1])\n productDetailViews = str(line['metrics'][0]['values'][2])\n productAddsToCart = str(line['metrics'][0]['values'][3])\n productCheckouts = str(line['metrics'][0]['values'][4])\n uniquePurchases = str(line['metrics'][0]['values'][5])\n #write variables to csv per row\n writer.writerow([date,\n medium,\n userType,\n deviceCategory,\n sessions,\n pageviews,\n productDetailViews,\n productAddsToCart,\n productCheckouts,\n uniquePurchases\n ])\n print \"complete\"",
"def pytest_report_header(config):\n return \"python client -- requests library\"",
"def api_req(dev, api_call):\r\n import xmltodict\r\n import logging\r\n try:\r\n r = requests.get(dev + ':8060' + api_call, timeout=5)\r\n except Exception as exc:\r\n response = [\"ERR\", exc]\r\n return response[0]\r\n except ConnectionError as connerr:\r\n response = [\"ERR\", connerr]\r\n return response[0]\r\n except TimeoutError as toerr:\r\n response = [\"ERR\", toerr]\r\n return response[0], toerr\r\n r_code = r.status_code\r\n if r_code == 200:\r\n print(\"REQUEST WAS A SUCCESS. DEVICE RETURNED: {} \".format(str(r)))\r\n r2 = r.text\r\n response = xmltodict.parse(r2, xml_attribs=False)\r\n return response\r\n else:\r\n response = \"UnknownERR\"\r\n dev.state(DISABLED)\r\n return msg_box(response)",
"def __str__(self):\n if self._show_all:\n response = 'NAME,INSTALLED,VULNERABILITY,SEVERITY,ALLOWED\\n'\n else:\n response = 'NAME,INSTALLED,VULNERABILITY,SEVERITY\\n'\n\n for row in self._vulnerabilities:\n if not self._show_all:\n row = row[:-1]\n\n response += ','.join(row)\n response += '\\n'\n\n return response",
"def get_report(self) -> str:\n return self.diagnostics.get_report()",
"def serve_communications_and_statuses(erpnext_support_user, erpnext_support_issues, bench_site):\n\tauthenticate_erpnext_support_user(erpnext_support_user)\n\tsync_time = get_datetime_str(now_datetime())\n\tres = {}\n\ttime.sleep(5)\n\n\tfor erpnext_support_issue in json.loads(erpnext_support_issues):\n\t\tif not erpnext_support_issue.get(\"frappe_issue_id\"):\n\t\t\tcontinue\n\n\t\t# Sync Communications for Issue\n\t\tfields = [\"name\", \"subject\", \"content\", \"recipients\", \"has_attachment\", \"creation\"]\n\t\tfilters = [\n\t\t\t[\"reference_doctype\", \"=\", \"Issue\"],\n\t\t\t[\"reference_name\", \"=\", erpnext_support_issue.get(\"frappe_issue_id\")],\n\t\t\t[\"communication_medium\", \"=\", \"Email\"],\n\t\t\t[\"sent_or_received\", \"=\", \"Sent\"],\n\t\t\t[\"creation\", \">\", get_datetime(erpnext_support_issue.get(\"last_sync_on\"))]\n\t\t]\n\t\tcommunications = call(frappe.get_all, doctype=\"Communication\", filters=filters, fields=fields, order_by=\"creation ASC\")\n\n\t\t# Sync Attachments for Communications\n\t\tcommunications = get_attachments(communications)\n\n\t\t# Sync Status for Issue\n\t\tfrappe_issue = frappe.get_doc(\"Issue\", erpnext_support_issue.get(\"frappe_issue_id\"))\n\n\t\tres[erpnext_support_issue.get(\"name\")] = {\n\t\t\t\"communications\": communications,\n\t\t\t\"status\": \"Open\" if frappe_issue.get(\"status\") not in [\"Open\", \"Closed\"] else frappe_issue.get(\"status\"),\n\t\t\t\"priority\": frappe_issue.get(\"priority\"),\n\t\t\t\"resolution_by\": get_datetime_str(frappe_issue.resolution_by) if frappe_issue.resolution_by else None,\n\t\t\t\"last_sync_on\": sync_time,\n\t\t\t\"release\": frappe_issue.get(\"release\")\n\t\t}\n\n\treturn json.dumps(res)",
"def getResourceDef(url, user, pWd, resourceName):\n \n print(\"getting resource for catalog:-\" + url + \" resource=\" + resourceName +\n ' user=' + user)\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n # print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\"} \n tResp = requests.get(apiURL, params={}, headers=header, auth=HTTPBasicAuth(user,pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n return tResp.status_code, json.loads(tResp.text)\n else:\n # not valid\n return tResp.status_code, None"
] | [
"0.6823767",
"0.581572",
"0.568539",
"0.5526973",
"0.5380427",
"0.53281105",
"0.53126675",
"0.5279853",
"0.5272536",
"0.5255016",
"0.52183175",
"0.51712066",
"0.5138143",
"0.5130867",
"0.5126458",
"0.511854",
"0.5118234",
"0.50857085",
"0.50781465",
"0.5074722",
"0.5067323",
"0.5054007",
"0.50318706",
"0.49882627",
"0.4972969",
"0.49689487",
"0.49602342",
"0.49565107",
"0.49409983",
"0.493596"
] | 0.71547586 | 0 |
Delete existing compatibility report [Example] ${resp} = Fusion Api Delete Security Compatibility Report | | | | def fusion_api_delete_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):
return self.security_standards.delete(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(cm_response, **data):\n return cm_response",
"def test_delete_success(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual({}, channel.json_body)\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n # check that report was deleted\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])",
"def fusion_api_delete_alert(self, uri=None, api=None, headers=None, param=''):\n return self.alert.delete(uri, api, headers, param=param)",
"def delete_response(self):\n deriva_ctx.deriva_response.status = '204 No Content'\n return deriva_ctx.deriva_response",
"def api_delete(self, *args, **kwargs):\n return self.api_delete_with_response(*args, **kwargs)[0]",
"def delete_ticket(data):\n firebase_uid = data[\"session\"].split('/')[-1]\n for i in data[\"queryResult\"][\"outputContexts\"]:\n if \"ticket_params\" in i[\"name\"]:\n ticket_id = i[\"parameters\"][\"ticket_id\"]\n db = firebase.database()\n db.child(\"user_data\").child(firebase_uid).child(\"Complaints\").child(ticket_id).remove()\n response = {\n \"fulfillmentText\": \"Ticket removed.\"\n }\n return response",
"def delete(self):\n return self.request('', pylastica.request.Request.DELETE)",
"def delete(self, request):\n return BossHTTPError(\" This API version is unsupported. Update to version {}\".format(version),\n ErrorCodes.UNSUPPORTED_VERSION)",
"def deleteUpgrade(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('DELETE', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified upgrade does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\treturn deserialize_string_json(payload)",
"def delete(self, request, nnid, wfver):\n try:\n return_data = \"\"\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))",
"def delete_sample(a1000):\n hash_value = demisto.getArg('hash')\n try:\n response_json = a1000.delete_samples(hash_value).json()\n except Exception as e:\n return_error(str(e))\n\n res = response_json.get('results')\n markdown = f'''## ReversingLabs A1000 delete sample\\n **Message:** {res.get('message')}\n **MD5:** {demisto.get(res, 'detail.md5')}\n **SHA1:** {demisto.get(res, 'detail.sha1')}\n **SHA256:** {demisto.get(res, 'detail.sha256')}'''\n\n command_result = CommandResults(\n outputs_prefix='ReversingLabs',\n outputs={'a1000_delete_report': response_json},\n readable_output=markdown\n )\n\n file_result = fileResult('Delete sample report file', json.dumps(response_json, indent=4),\n file_type=EntryType.ENTRY_INFO_FILE)\n\n return [command_result, file_result]",
"def delete(self, call, params={}): \n # Build an endpoint using the parameters...\n endpoint = self._calls[call](params)\n url = '{}/{}'.format(str(self), str(endpoint))\n return self.deleter.respond(url)",
"def delete(resource, params=None, expected_status_code=204, user=user_data):\n return call(requests.delete, resource, expected_status_code, user, params=params)",
"def delete(device_id):\n api_response = requests.delete(\n \"https://api.serverdensity.io/inventory/devices/\" + device_id,\n params={\"token\": get_sd_auth(\"api_token\")},\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\"Could not parse API Response content: %s\", api_response.content)\n raise CommandExecutionError(\n \"Failed to create, API Response: {}\".format(api_response)\n )\n else:\n return None",
"def _send_lti2_delete(self):\r\n payload = textwrap.dedent(\"\"\"\r\n {\r\n \"@context\" : \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\" : \"Result\"\r\n }\r\n \"\"\")\r\n return self._send_lti2(payload)",
"def delete_reports(request):\n\n request_body_json = json.loads(request.body)\n report_list = request_body_json['report_list']\n # print(type(report_list))\n try:\n with transaction.atomic():\n json_resp = {'msg':'ok'}\n for report in report_list:\n # print(report)\n rp = Report.objects.filter(id_report = report['id_report'],language = report['language'])\n if rp.count() == 1:\n rp = rp.first()\n Annotate.objects.filter(id_report = rp,language=rp.language).delete()\n Linked.objects.filter(id_report = rp,language=rp.language).delete()\n Mention.objects.filter(id_report = rp,language=rp.language).delete()\n Associate.objects.filter(id_report = rp,language=rp.language).delete()\n Contains.objects.filter(id_report = rp,language=rp.language).delete()\n GroundTruthLogFile.objects.filter(id_report = rp,language=rp.language).delete()\n rp.delete()\n # print('DONE')\n return JsonResponse(json_resp)\n\n except Exception as e:\n json_error={'error':e}\n return JsonResponse(json_error)",
"def send_delete(url, data={}, headers={}, return_output=False):\n req = requests.delete(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)",
"def delete(resource):\n\tresp = requests.delete(\n\t\t_endpoint(resource, 'DELETE'),\n\t\theaders=PAYLOAD_HEADERS,\n\t\tverify=SERVER_CERT\n\t)\n\tresp.raise_for_status()\n\treturn resp.json()",
"def fusion_api_delete_resource(self, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.delete(uri, headers)",
"def delete_rest_call(api_url, username, password, print_output=False):\n response = requests.delete(api_url,\n auth=HTTPBasicAuth(username, password),\n verify=False,\n timeout=4)\n\n if print_output:\n if response.status_code == 201:\n print(\"DELETE OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 200:\n print(\"DELETE OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 204:\n print(\"DELETE OK %s (code %d)\" % (api_url, response.status_code))\n else:\n print(\"DELETE Failed for: %s (code %d)\" % (api_url, response.status_code))\n print(\" - Text: %s\" % response.text)\n return response",
"def delete(\n self,\n resource_group_name, # type: str\n resource_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MaintenanceConfiguration\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MaintenanceConfiguration\"]\n error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-07-01-preview\"\n\n # Construct URL\n url = self.delete.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'resourceName': self._serialize.url(\"resource_name\", resource_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = 'application/json'\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.MaintenanceError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MaintenanceConfiguration', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def _delete_tag_response(response):\n if 'errortext' in response:\n if 'Unable to find resource by id' in response['errortext']:\n errors.invalid_resource_id()\n\n return {\n 'template_name_or_list': 'status.xml',\n 'response_type': 'DeleteTagsResponse',\n 'return': 'true'\n }",
"def do_DELETE(self,):\n self.http_method = 'DELETE'\n self.response()",
"def test_lti20_delete_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'DELETE')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert there's no score\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIsNone(self.xmodule.module_score)\r\n self.assertEqual(self.xmodule.score_comment, u\"\")\r\n (_, evt_type, called_grade_obj), _ = self.system.publish.call_args\r\n self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})\r\n self.assertEqual(evt_type, 'grade')",
"def delete(self):\r\n url = \"%s/delete\" % os.path.dirname(self._url)\r\n params = {\r\n 'f' : 'json',\r\n 'versionName' : self.properties.versionName,\r\n 'sessionID' : self._guid\r\n }\r\n res = self._con.post(url, params)\r\n if 'success' in res:\r\n return res['success']\r\n return res",
"def test_delete_file_output(self):\n response = self.client.open(\n '/v1/control/file/{id}'.format(id='id_example'),\n method='DELETE',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass",
"def delete(pat: str, resource_registration_endpoint: str,\n resource_id: str,\n secure: bool = False):\n headers={\"Authorization\": \"Bearer \"+pat}\n\n disable_warnings_if_debug(secure)\n response = request(\"DELETE\", resource_registration_endpoint + resource_id, headers=headers, verify=secure)\n\n if not is_ok(response):\n raise Exception(\"An error occurred while deleting the resource: \"+str(response.status_code)+\":\"+str(response.reason)+\":\"+str(response.text))",
"def test_superuser_delete_assessment(self):\n response = self.superuser.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n response = self.superuser.get(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_delete_data(self):\n data_github = {\n \"version_control\": \"github\",\n \"scm_repo\": \"test_delete\",\n \"scm_branch\": \"test_delete\",\n \"scm_commit\": \"test_delete\",\n \"repo\": \"test_delete1\",\n \"branch\": \"test_delete1\",\n \"enabled\": 0\n }\n\n data_git = {\n \"version_control\": \"git\",\n \"scm_repo\": \"test_delete\",\n \"scm_branch\": \"test_delete\",\n \"scm_commit\": \"test_delete\",\n \"repo\": \"test_delete1\",\n \"branch\": \"test_delete1\",\n \"enabled\": 0\n }\n\n for data in [data_git, data_github]:\n self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n\n resp = self.client.delete(\n \"/tracking?repo=test_delete1&branch=test_delete1\", content_type=\"application/json\", headers=self.auth\n )\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.SUCCESS, resp_dict.get(\"code\"), msg=\"Error in status code return\")"
] | [
"0.6129747",
"0.57974845",
"0.5664502",
"0.5619248",
"0.55842876",
"0.5583797",
"0.5574962",
"0.5562506",
"0.55540067",
"0.5547024",
"0.5487619",
"0.5425782",
"0.54195935",
"0.53439325",
"0.5341132",
"0.53255165",
"0.52959824",
"0.5290835",
"0.5276131",
"0.52161545",
"0.52069193",
"0.52051544",
"0.5185179",
"0.51758146",
"0.51721764",
"0.5165451",
"0.51283586",
"0.51240116",
"0.51218957",
"0.51194835"
] | 0.73193264 | 0 |
Does Security Mode Change FIPS/CNSA/LEGACY. [Arguments] | def fusion_api_change_security_mode(self, body, uri=None, api=None, headers=None, param='/modes/current-mode'):
return self.security_standards.put(uri=uri, api=api, headers=headers, body=body, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def secure(self) -> bool:\n return self.get_state(self.args[CONF_OVERALL_SECURITY_STATUS]) == \"Secure\"",
"def _ens_psec_supported(self):\n pass",
"def test_set_mode(self):\n context = Context(SSLv23_METHOD)\n assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)",
"def test_set_mode_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_mode(None)",
"def libc_prctl_pr_set_seccomp_mode_filter(bpf_bytes: bytes) -> None:\n # seccomp arg must be a pointer to:\n #\n # struct sock_fprog {\n # unsigned short len; /* Number of filter blocks */\n # struct sock_filter* filter;\n # }\n #\n # ... and we'll emulate that with raw bytes.\n #\n # Our seccomp.bpf file contains the bytes for `filter`. Calculate `len`.\n # (We call it `n_blocks` because `len` is taken in Python.)\n #\n # Each filter is:\n #\n # struct sock_filter {\t/* Filter block */\n # \t__u16\tcode; /* Actual filter code */\n # \t__u8\tjt;\t/* Jump true */\n # \t__u8\tjf;\t/* Jump false */\n # \t__u32\tk; /* Generic multiuse field */\n # };\n #\n # ... for a total of 8 bytes (64 bits) per filter.\n\n n_blocks = len(bpf_bytes) // 8\n\n # Pack a sock_fprog struct. With a pointer in it.\n bpf_buf = ctypes.create_string_buffer(bpf_bytes)\n sock_fprog = struct.pack(\"HL\", n_blocks, ctypes.addressof(bpf_buf))\n\n _call_c_style(libc, \"prctl\", PR_SET_SECCOMP, SECCOMP_MODE_FILTER, sock_fprog, 0, 0)",
"def sslmode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sslmode\")",
"def __get_verify_mode(self):\n ...",
"def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)",
"def check_secure():\n return get_config_handler().check_secure()",
"def antenny_is_safemode(self):\n return self.safe_mode",
"def test_compatibility(cipher, mode):\n\n chiper_obj = cipher_params(cipher, os.urandom(length_by_cipher[cipher]))[0] #need to be object, not interface, to validate_for_algorithm work\n if chiper_obj.name == \"ChaCha20\":\n return True\n mode_object = None\n if mode == 'CBC':\n mode_object = modes.CBC(os.urandom(16))\n elif mode == 'GCM':\n mode_object = modes.GCM(os.urandom(16), os.urandom(16))\n else:\n return False\n\n return default_backend().cipher_supported(chiper_obj, mode_object)",
"def turnOnFirewallFromActioncenter():\n pass",
"def selinux_mode(self):\n if not self._selinux_mode:\n # Get the SELinux mode from the connected device\n cmd = [\"getenforce\"]\n # TODO: surround with try/except?\n tmp = subprocess.check_output(self.shell + cmd).decode()\n self._selinux_mode = tmp.strip('\\r\\n').lower()\n return self._selinux_mode",
"def test_set_verify_mode(self):\n context = Context(SSLv23_METHOD)\n assert context.get_verify_mode() == 0\n context.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE)\n assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)",
"def DualMode(self) -> bool:",
"def test_mixedModes(self):\n self._sendModeChange(\"+osv\", \"a_user another_user\")\n self._checkModeChange([(True, \"osv\", (\"a_user\", None, \"another_user\"))])\n self._sendModeChange(\"+v-os\", \"a_user another_user\")\n self._checkModeChange(\n [(True, \"v\", (\"a_user\",)), (False, \"os\", (\"another_user\", None))]\n )",
"def get_federation_mode(self):\n if self.get_tls():\n return \"tcp+tls\"\n else:\n return \"tcp\"",
"def maintenance_mode():\n pass",
"def sis3305_mode(self, val):\n if val in (0, 1, 2):\n if val != self._faux._sis3305_mode:\n self._faux._sis3305_mode = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")",
"def test_session_cache_mode(self):\n context = Context(SSLv23_METHOD)\n context.set_session_cache_mode(SESS_CACHE_OFF)\n off = context.set_session_cache_mode(SESS_CACHE_BOTH)\n assert SESS_CACHE_OFF == off\n assert SESS_CACHE_BOTH == context.get_session_cache_mode()",
"def sis3305_mode(self):\n return self._faux._sis3305_mode",
"def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def test_enable_maintence_mode1(self):\n pass",
"def tcpssl_server_mode(self, tcpssl_server_mode):\n\n self._tcpssl_server_mode = tcpssl_server_mode",
"def is_secure_context(self):\n raise exceptions.NotImplementedError()",
"def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()"
] | [
"0.5541181",
"0.5515481",
"0.5485578",
"0.5464894",
"0.5460684",
"0.54190993",
"0.54003185",
"0.53532046",
"0.5337178",
"0.5280731",
"0.52471733",
"0.52117056",
"0.52020633",
"0.51673883",
"0.5163877",
"0.5139177",
"0.51375663",
"0.5135457",
"0.5119857",
"0.51019514",
"0.5077868",
"0.5074538",
"0.50694686",
"0.50617373",
"0.5044145",
"0.50434256",
"0.50404394",
"0.5034651",
"0.500643",
"0.5002731"
] | 0.5638524 | 0 |
Gets a list of Security modes [Example] ${resp} = Fusion Api Get Security Modes | | | | | def fusion_api_get_security_modes(self, uri=None, api=None, headers=None, param='/modes'):
return self.security_standards.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_current_security_mode(self, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_security_modeName(self, uri=None, api=None, headers=None, param=''):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_security_protocols(self, uri=None, api=None, headers=None, param='/protocols'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def GetMajorModes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_modes(self):\n return self.__modes",
"def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )",
"def getDisplayModes(self, obj):\n modes = []\n return modes",
"def getSecurity(self):\n return self.client.get(self.name +\"/_security\").getBodyData()",
"def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes",
"def show_modes(var, wrapper, message):\n wrapper.pm(messages[\"available_modes\"].format(_get_gamemodes(var)))",
"def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())",
"def test_mode_get(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n cgs = mocker.patch('pysds011.driver.SDS011.cmd_get_mode')\n runner = CliRunner()\n result = runner.invoke(main, ['mode'])\n cgs.assert_called_once_with(id=b'\\xff\\xff')\n\n assert result.exit_code == 0",
"def get_modes_of_operation(self):\n return [\"Online\", \"Offline\"]",
"def get_modes(self):\n return self.circuit.get_modes()",
"def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]",
"def fusion_api_change_security_mode(self, body, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.put(uri=uri, api=api, headers=headers, body=body, param=param)",
"async def _load_modes(self) -> None:\n modes: List[Dict[str, Any]] = await self._api_request(\"modes\")\n _LOGGER.debug(\"Loaded modes\")\n self._modes = [Mode(m) for m in modes]",
"def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:",
"def getSupportedModes(self):\n (err, modesList) = NvCamera().getNvCameraTools().getSupportedModes()\n if (err != NvSuccess):\n raise NvCameraException(err, \"Couldn't get supported sensor modes!!\")\n\n modesList.sort(key = attrgetter('Resolution.width', 'Resolution.height'), reverse = True)\n\n return modesList",
"def selectable_services_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_service_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_modes(self, group):\n ret = self._transfer(TVGetModes(group=group))\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None",
"def fetch_switch_classic(url = SWITCHclassic_url):\n import urllib2\n # SWITCHclassis ACLs holen\n opener = urllib2.build_opener()\n acls_raw = opener.open(SWITCHclassic_url)\n acls_raw = acls_raw.readlines()\n classic_acls = []\n for line in acls_raw:\n line = line.strip()\n classic_acls.append(line.split(\" \"))\n return classic_acls",
"def get_mode(self):\r\n return self._api.get_mode()",
"def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')",
"def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def mac_security_tab_status(refrenceid):\n try:\n AppButtons = getAllObjects(refrenceid)\n DeviceStatus = AppButtons[25:29]\n Descriptions = []\n for device in DeviceStatus:\n Descriptionsofsettings = getApplicatontitle(device)\n Descriptions.append(Descriptionsofsettings)\n except Exception as er:\n return False\n return Descriptions",
"def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]",
"def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()",
"def get_requested_mode(self, request_dict):\r\n if 'audit_mode' in request_dict:\r\n return 'audit'\r\n if 'certificate_mode' and request_dict.get(\"honor-code\"):\r\n return 'honor'\r\n if 'certificate_mode' in request_dict:\r\n return 'verified'"
] | [
"0.68961036",
"0.653141",
"0.5915215",
"0.58452207",
"0.5733469",
"0.56415015",
"0.56192136",
"0.5553235",
"0.55484384",
"0.5471953",
"0.5440756",
"0.5433595",
"0.54021186",
"0.5392016",
"0.53893346",
"0.53558445",
"0.5344282",
"0.5338408",
"0.53269875",
"0.53224444",
"0.52973616",
"0.52823275",
"0.52712977",
"0.52288693",
"0.5228405",
"0.5187952",
"0.51685864",
"0.5168307",
"0.5124096",
"0.51214355"
] | 0.78569627 | 0 |
Gets a list of Security modes [Example] ${resp} = Fusion Api Get Current Security Mode | | | | | def fusion_api_get_current_security_mode(self, uri=None, api=None, headers=None, param='/modes/current-mode'):
return self.security_standards.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_security_modes(self, uri=None, api=None, headers=None, param='/modes'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_security_modeName(self, uri=None, api=None, headers=None, param=''):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_security_protocols(self, uri=None, api=None, headers=None, param='/protocols'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def getSecurity(self):\n return self.client.get(self.name +\"/_security\").getBodyData()",
"def GetMajorModes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_modes(self):\n return self.__modes",
"def getDisplayModes(self, obj):\n modes = []\n return modes",
"def get_modes_of_operation(self):\n return [\"Online\", \"Offline\"]",
"def mac_security_tab_status(refrenceid):\n try:\n AppButtons = getAllObjects(refrenceid)\n DeviceStatus = AppButtons[25:29]\n Descriptions = []\n for device in DeviceStatus:\n Descriptionsofsettings = getApplicatontitle(device)\n Descriptions.append(Descriptionsofsettings)\n except Exception as er:\n return False\n return Descriptions",
"def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )",
"def fusion_api_change_security_mode(self, body, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.put(uri=uri, api=api, headers=headers, body=body, param=param)",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes",
"def selectable_services_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_service_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes",
"def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']",
"def _get_mode():\n return context.get_context('mode')",
"def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())",
"def get_modes(self):\n return self.circuit.get_modes()",
"def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def get_requested_mode(self, request_dict):\r\n if 'audit_mode' in request_dict:\r\n return 'audit'\r\n if 'certificate_mode' and request_dict.get(\"honor-code\"):\r\n return 'honor'\r\n if 'certificate_mode' in request_dict:\r\n return 'verified'",
"def fusion_api_get_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def test_mode_get(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n cgs = mocker.patch('pysds011.driver.SDS011.cmd_get_mode')\n runner = CliRunner()\n result = runner.invoke(main, ['mode'])\n cgs.assert_called_once_with(id=b'\\xff\\xff')\n\n assert result.exit_code == 0",
"def getMode(self):\r\n # ViStatus status = AcqrsD1_getMode(ViSession instrumentID,\r\n # ViInt32* mode, ViInt32* modifier, ViInt32* flags)\r\n mode = ViInt32()\r\n modifier = ViInt32()\r\n flags = ViInt32()\r\n self.callFunc('AcqrsD1_getMode', self.session,\r\n byref(mode), byref(modifier), byref(flags))\r\n return (mode.value, modifier.value, flags.value)",
"def fetch_switch_classic(url = SWITCHclassic_url):\n import urllib2\n # SWITCHclassis ACLs holen\n opener = urllib2.build_opener()\n acls_raw = opener.open(SWITCHclassic_url)\n acls_raw = acls_raw.readlines()\n classic_acls = []\n for line in acls_raw:\n line = line.strip()\n classic_acls.append(line.split(\" \"))\n return classic_acls",
"def show_modes(var, wrapper, message):\n wrapper.pm(messages[\"available_modes\"].format(_get_gamemodes(var)))",
"def get_app_mode(self):\n\t\treturn call_sdk_function('PrlApi_GetAppMode')",
"def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]",
"def getSupportedModes(self):\n (err, modesList) = NvCamera().getNvCameraTools().getSupportedModes()\n if (err != NvSuccess):\n raise NvCameraException(err, \"Couldn't get supported sensor modes!!\")\n\n modesList.sort(key = attrgetter('Resolution.width', 'Resolution.height'), reverse = True)\n\n return modesList",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return"
] | [
"0.78809065",
"0.66877264",
"0.5968293",
"0.5922285",
"0.584577",
"0.57545865",
"0.5584333",
"0.55589724",
"0.55572355",
"0.5537556",
"0.55370617",
"0.55359447",
"0.54873556",
"0.546913",
"0.54307616",
"0.5405919",
"0.53729117",
"0.5366049",
"0.53463566",
"0.53376895",
"0.53323615",
"0.5326829",
"0.53243214",
"0.53166157",
"0.5312421",
"0.53088915",
"0.5299852",
"0.52989644",
"0.52908266",
"0.5243963"
] | 0.74098223 | 1 |
Gets a list of Security modes [Example] ${resp} = Fusion Api Get Security ModeName | | | | | def fusion_api_get_security_modeName(self, uri=None, api=None, headers=None, param=''):
return self.security_standards.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_security_modes(self, uri=None, api=None, headers=None, param='/modes'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_current_security_mode(self, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_security_protocols(self, uri=None, api=None, headers=None, param='/protocols'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def GetMajorModes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_modes(self):\n return self.__modes",
"def getSecurity(self):\n return self.client.get(self.name +\"/_security\").getBodyData()",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )",
"def test_mode_get(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n cgs = mocker.patch('pysds011.driver.SDS011.cmd_get_mode')\n runner = CliRunner()\n result = runner.invoke(main, ['mode'])\n cgs.assert_called_once_with(id=b'\\xff\\xff')\n\n assert result.exit_code == 0",
"def getDisplayModes(self, obj):\n modes = []\n return modes",
"def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes",
"def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())",
"def show_modes(var, wrapper, message):\n wrapper.pm(messages[\"available_modes\"].format(_get_gamemodes(var)))",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_modes_of_operation(self):\n return [\"Online\", \"Offline\"]",
"def selectable_services_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_service_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes",
"def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]",
"def mac_security_tab_status(refrenceid):\n try:\n AppButtons = getAllObjects(refrenceid)\n DeviceStatus = AppButtons[25:29]\n Descriptions = []\n for device in DeviceStatus:\n Descriptionsofsettings = getApplicatontitle(device)\n Descriptions.append(Descriptionsofsettings)\n except Exception as er:\n return False\n return Descriptions",
"def fetch_switch_classic(url = SWITCHclassic_url):\n import urllib2\n # SWITCHclassis ACLs holen\n opener = urllib2.build_opener()\n acls_raw = opener.open(SWITCHclassic_url)\n acls_raw = acls_raw.readlines()\n classic_acls = []\n for line in acls_raw:\n line = line.strip()\n classic_acls.append(line.split(\" \"))\n return classic_acls",
"def fusion_api_change_security_mode(self, body, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.put(uri=uri, api=api, headers=headers, body=body, param=param)",
"def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]",
"def get_modes(self):\n return self.circuit.get_modes()",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return",
"def modes(self) -> List[str]:\n return [m.name for m in self._modes]",
"def _get_mode():\n return context.get_context('mode')",
"def getSupportedModes(self):\n (err, modesList) = NvCamera().getNvCameraTools().getSupportedModes()\n if (err != NvSuccess):\n raise NvCameraException(err, \"Couldn't get supported sensor modes!!\")\n\n modesList.sort(key = attrgetter('Resolution.width', 'Resolution.height'), reverse = True)\n\n return modesList",
"def get_modes(self, group):\n ret = self._transfer(TVGetModes(group=group))\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None",
"def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')"
] | [
"0.77332807",
"0.7017976",
"0.5843751",
"0.5707068",
"0.5608259",
"0.55467093",
"0.5505064",
"0.55039376",
"0.54651487",
"0.54621285",
"0.53904",
"0.5379105",
"0.5360107",
"0.5350867",
"0.5305736",
"0.53032374",
"0.5303141",
"0.52850384",
"0.5256461",
"0.5249061",
"0.5242854",
"0.523563",
"0.5213932",
"0.52067995",
"0.52067995",
"0.5203799",
"0.5194068",
"0.51920736",
"0.5177769",
"0.51765984"
] | 0.7043026 | 1 |
Update security protocols [Example] ${resp} = Fusion Api Update Security Protocols | | | | | | def fusion_api_update_security_protocols(self, body, uri=None, api=None, headers=None, param='/protocols'):
return self.security_standards.put(body=body, uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_firewall_rule_protocol(self):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.UpdateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--protocol', 'any'],\r\n {'protocol': None, })",
"def fusion_api_get_security_protocols(self, uri=None, api=None, headers=None, param='/protocols'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)",
"def test_protocols_updated(self):\n assert self.skill_config.protocols == {self.new_protocol_id}",
"def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected",
"def test_restricted_to_protocols_updated(self):\n assert self.connection_config.restricted_to_protocols == {self.new_protocol_id}",
"def fusion_api_change_security_mode(self, body, uri=None, api=None, headers=None, param='/modes/current-mode'):\n return self.security_standards.put(uri=uri, api=api, headers=headers, body=body, param=param)",
"def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)",
"def fusion_api_edit_switch(self, body, uri, api=None, headers=None):\n return self.switch.update(body, uri, api, headers)",
"def protocol(self):\n controller = self._controller\n if flask.request.method == 'GET':\n result = controller.get()\n else:\n result = controller.put(flask.request.headers, flask.request.data)\n if isinstance(result, ProblemDetail):\n return result\n return make_response(*result)",
"def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}",
"def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}",
"def update_sample_protocol_info(existing_protocols, experiment_protocol, protocol_url):\n\n if not \"protocol\" in experiment_protocol:\n return (existing_protocols, False)\n\n is_updated = False\n # Compare each entry in experiment protocol with the existing\n # protocols; if the entry is new, add it to exising_protocols.\n for new_protocol in experiment_protocol[\"protocol\"]:\n new_protocol_text = new_protocol.get(\"text\", \"\")\n new_protocol_text = ArrayExpressSurveyor.extract_protocol_text(new_protocol_text)\n\n # Ignore experiment-level protocols whose accession or text\n # field is unavailable or empty.\n if not new_protocol.get(\"accession\", \"\").strip() or not new_protocol_text:\n continue\n\n new_protocol_is_found = False\n for existing_protocol in existing_protocols:\n if (\n new_protocol.get(\"accession\", \"\") == existing_protocol[\"Accession\"]\n and new_protocol_text == existing_protocol[\"Text\"]\n and new_protocol.get(\"type\", \"\") == existing_protocol[\"Type\"]\n ):\n new_protocol_is_found = True\n break\n if not new_protocol_is_found:\n existing_protocols.append(\n {\n \"Accession\": new_protocol[\"accession\"],\n \"Text\": new_protocol_text,\n \"Type\": new_protocol.get(\"type\", \"\"), # in case 'type' field is unavailable\n \"Reference\": protocol_url,\n }\n )\n is_updated = True\n\n return (existing_protocols, is_updated)",
"def protocol(self, code: str) -> str:\n return 'https'",
"def fusion_api_update_security_compatibility_report(self, body, uri=None, api=None, headers=None, param='/compatibility-report?force=true'):\n return self.security_standards.post(uri=uri, api=api, headers=headers, body=body, param=param)",
"def updateResourceDef(url, user, pWd, resourceName, resJson):\n \n print(\"\\tupdating resource for catalog:-\" + url + \" resource=\" + \n resourceName + ' user=' + user)\n print(\"\\t\" + json.dumps(resJson))\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"} \n tResp = requests.put(apiURL, data=json.dumps(resJson), headers=header, \n auth=HTTPBasicAuth(user, pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n print(\"\\tyay - update resource worked...\")\n print(tResp)\n return tResp.status_code\n else:\n # not valid\n print(\"\\tdarn - update resource failed...\")\n print(tResp)\n return tResp.status_code",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def test_update_firewall_policy(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })",
"def PatchConceptLanguages(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def fusion_api_patch_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.patch(body, uri, api, headers)",
"def update_libscapi(self):\n protocol_name = self.protocol_config['protocol']\n os.system('fab -f Execution/fabfile.py update_libscapi --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')",
"def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:\n response[\"choices\"][0][\"text\"] += stream_response[\"choices\"][0][\"text\"]\n response[\"choices\"][0][\"finish_reason\"] = stream_response[\"choices\"][0][\n \"finish_reason\"\n ]\n response[\"choices\"][0][\"logprobs\"] = stream_response[\"choices\"][0][\"logprobs\"]",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def fusion_api_edit_lsg(self, body, uri, api=None, headers=None):\n return self.lsg.update(body, uri, api, headers)",
"def test_update_firewall(self):\r\n resource = 'firewall'\r\n cmd = firewall.UpdateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })",
"def update():\n return 'update api in put'",
"def update(cls, client, resource) :\n try :\n if type(resource) is not list :\n updateresource = nshttpprofile()\n updateresource.name = resource.name\n updateresource.dropinvalreqs = resource.dropinvalreqs\n updateresource.markhttp09inval = resource.markhttp09inval\n updateresource.markconnreqinval = resource.markconnreqinval\n updateresource.cmponpush = resource.cmponpush\n updateresource.conmultiplex = resource.conmultiplex\n updateresource.maxreusepool = resource.maxreusepool\n updateresource.dropextracrlf = resource.dropextracrlf\n updateresource.incomphdrdelay = resource.incomphdrdelay\n updateresource.websocket = resource.websocket\n updateresource.rtsptunnel = resource.rtsptunnel\n updateresource.reqtimeout = resource.reqtimeout\n updateresource.adpttimeout = resource.adpttimeout\n updateresource.reqtimeoutaction = resource.reqtimeoutaction\n updateresource.dropextradata = resource.dropextradata\n updateresource.weblog = resource.weblog\n updateresource.clientiphdrexpr = resource.clientiphdrexpr\n updateresource.maxreq = resource.maxreq\n updateresource.persistentetag = resource.persistentetag\n updateresource.spdy = resource.spdy\n updateresource.http2 = resource.http2\n updateresource.http2maxheaderlistsize = resource.http2maxheaderlistsize\n updateresource.http2maxframesize = resource.http2maxframesize\n updateresource.http2maxconcurrentstreams = resource.http2maxconcurrentstreams\n updateresource.http2initialwindowsize = resource.http2initialwindowsize\n updateresource.http2headertablesize = resource.http2headertablesize\n updateresource.reusepooltimeout = resource.reusepooltimeout\n updateresource.maxheaderlen = resource.maxheaderlen\n updateresource.minreusepool = resource.minreusepool\n return updateresource.update_resource(client)\n else :\n if (resource and len(resource) > 0) :\n updateresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n updateresources[i].name = resource[i].name\n updateresources[i].dropinvalreqs = resource[i].dropinvalreqs\n updateresources[i].markhttp09inval = resource[i].markhttp09inval\n updateresources[i].markconnreqinval = resource[i].markconnreqinval\n updateresources[i].cmponpush = resource[i].cmponpush\n updateresources[i].conmultiplex = resource[i].conmultiplex\n updateresources[i].maxreusepool = resource[i].maxreusepool\n updateresources[i].dropextracrlf = resource[i].dropextracrlf\n updateresources[i].incomphdrdelay = resource[i].incomphdrdelay\n updateresources[i].websocket = resource[i].websocket\n updateresources[i].rtsptunnel = resource[i].rtsptunnel\n updateresources[i].reqtimeout = resource[i].reqtimeout\n updateresources[i].adpttimeout = resource[i].adpttimeout\n updateresources[i].reqtimeoutaction = resource[i].reqtimeoutaction\n updateresources[i].dropextradata = resource[i].dropextradata\n updateresources[i].weblog = resource[i].weblog\n updateresources[i].clientiphdrexpr = resource[i].clientiphdrexpr\n updateresources[i].maxreq = resource[i].maxreq\n updateresources[i].persistentetag = resource[i].persistentetag\n updateresources[i].spdy = resource[i].spdy\n updateresources[i].http2 = resource[i].http2\n updateresources[i].http2maxheaderlistsize = resource[i].http2maxheaderlistsize\n updateresources[i].http2maxframesize = resource[i].http2maxframesize\n updateresources[i].http2maxconcurrentstreams = resource[i].http2maxconcurrentstreams\n updateresources[i].http2initialwindowsize = resource[i].http2initialwindowsize\n updateresources[i].http2headertablesize = resource[i].http2headertablesize\n updateresources[i].reusepooltimeout = resource[i].reusepooltimeout\n updateresources[i].maxheaderlen = resource[i].maxheaderlen\n updateresources[i].minreusepool = resource[i].minreusepool\n result = cls.update_bulk_request(client, updateresources)\n return result\n except Exception as e :\n raise e",
"def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)",
"def fusion_api_update_from_group(self, uri, api=None, headers=None):\n param = '/compliance'\n return self.li.update(body=None, uri=uri, api=api, headers=headers, param=param)"
] | [
"0.55466896",
"0.54718137",
"0.5316541",
"0.51760364",
"0.51063836",
"0.50989",
"0.5069504",
"0.5057574",
"0.4968887",
"0.49183688",
"0.49088",
"0.48352388",
"0.48348862",
"0.4829088",
"0.48271632",
"0.4823801",
"0.48136333",
"0.48069957",
"0.4761026",
"0.47487354",
"0.47414017",
"0.4730852",
"0.4726672",
"0.4726672",
"0.47199726",
"0.47194213",
"0.4718824",
"0.46984348",
"0.4667034",
"0.46516642"
] | 0.67823476 | 0 |
Get a list of security protocols [Example] ${resp} = Fusion Api Get Security Protocols | | | | | def fusion_api_get_security_protocols(self, uri=None, api=None, headers=None, param='/protocols'):
return self.security_standards.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Get_AvailableProtocols(self, request, context: grpc.ServicerContext) \\\n -> Ot2Controller_pb2.Get_AvailableProtocols_Responses:\n # Run 'ls' command to collect the files.\n ssh_stdin, ssh_stdout, ssh_stderr = self.ssh.exec_command(\"ls \" + USER_STORAGE_DIR)\n output: str = ssh_stdout.readlines()\n\n protocol_list = []\n for line in output:\n line = line.strip()\n if line.endswith(\".py\"):\n protocol_list.append(silaFW_pb2.String(value=line))\n\n return Ot2Controller_pb2.Get_AvailableProtocols_Responses(AvailableProtocols=protocol_list)",
"def protocols(self):\n return list(self.query(Protocol))",
"def protocols(self) -> pulumi.Output['outputs.ServiceProtocols']:\n return pulumi.get(self, \"protocols\")",
"def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected",
"def fusion_api_get_security_modes(self, uri=None, api=None, headers=None, param='/modes'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval",
"def realtimestreaming_protocols(self, **kwargs):\n url_path = 'realtimestreaming/protocols'\n self.logger.debug(\"Get list of protocols\")\n return self._common_get(url_path, parameters=kwargs)",
"def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)",
"def get_protocols(self):\r\n\r\n return None",
"def protocols(self) -> Optional[pulumi.Input['ServiceProtocolsArgs']]:\n return pulumi.get(self, \"protocols\")",
"def protocols(self) -> Optional[pulumi.Input['ServiceProtocolsArgs']]:\n return pulumi.get(self, \"protocols\")",
"def protocols(self):\n if self._protocols is None:\n uri = \"/loadbalancers/protocols\"\n resp, body = self.method_get(uri)\n self._protocols = [proto[\"name\"] for proto in body[\"protocols\"]]\n return self._protocols",
"def winhttp_WinHttpQueryAuthSchemes(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpdwSupportedSchemes\", \"lpdwFirstScheme\", \"pdwAuthTarget\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"protocols\")",
"def getProtocol(self) -> str:\n ...",
"def _retrieve_fun(self):\n results = []\n for (dst, answer, ses) in self.mgr:\n encoded_pairs = ([], [])\n if answer:\n try:\n encoded_pairs = ses.decode_response(answer)\n\n except error.SNMPError:\n # SNMP errors lead to empty responses\n pass\n \n results.append(encoded_pairs)\n \n return results",
"def list(conn):\n try:\n return conn.get(url='/auth-providers')['providers']\n except SystemError as e:\n raise e",
"def list(options=None):\n if not options:\n r = requests.get('/', options)\n status_code = r.status_code\n response = RebrandlyResponse.raise_exception(status_code, r.json())\n if response == 'ok':\n return response['response']",
"def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):\n return self.ls.get(uri=uri, api=api, headers=headers, param=param)",
"def action_GetProtocolInfo(self, extract_returns=True):\n arguments = { }\n\n out_params = self._proxy_call_action(\"GetProtocolInfo\", arguments=arguments)\n\n rtn_args = out_params\n if extract_returns:\n rtn_args = [out_params[k] for k in (\"Source\", \"Sink\",)]\n if len(rtn_args) == 1:\n rtn_args = rtn_args[0]\n\n return rtn_args",
"def protocol(self, code: str) -> str:\n return 'https'",
"def call_SLV_getAllControllers(url: str, authentication: tuple, format: str,\n write_file_to: str = \"\") -> Union[Tuple[requests.request, str], requests.request]:\n api_method = 'getAllControllers' # function which gets called on SLV server\n api_part = '/api/asset/' # where the function is on SLV server\n # setting up parameters\n param = MultiDict([('ser', format)])\n # checking format input\n if not (\n format == 'json' or format == 'xml'): # if format argument does not match expected input raises an error\n raise ValueError(\n \"wrong input parameters for APIFinal.call_SLV_getAllControllers function : format must be either 'xml' or 'json' \\n\")\n print('calling ' + api_method + '...')\n r = requests.get(url + api_part + api_method, params=param, auth=authentication) # call the request\n if write_file_to == \"\": # if asked, writes file\n file_name = api_method # the output file name if write_file is true\n write_request(r, param, write_file_to)\n return r, file_name\n return r",
"def gen_auth_resp(chall_list):\n return [\"%s%s\" % (chall.__class__.__name__, chall.domain)\n for chall in chall_list]",
"def domains_v2():\n # Is this public?\n configs = get_configs()\n if configs['api_requests'] == 'auth':\n # Auth token in headers\n try:\n auth_token = Token.query.filter_by(auth_token=request.headers.get('Authorization')).first()\n except:\n return {\"alternatives\" : \"Database Error with token!\"}\n if not auth_token:\n return {\"alternatives\": \"Unauthorized!\"}\n\n req_data = request.get_json()\n url = req_data['url']\n if not url:\n return {\"alternatives\" : 'None'}\n \n domain_data = check(url)\n alternatives = {\"alternatives\": domain_data['available_alternatives']}\n return alternatives",
"def fusion_api_get_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)",
"def certificate_auth():\r\n url = 'https://www.12306.cn'\r\n response = requests.get(url, verify=False)\r\n print(response.status_code)\r\n print(response.text)",
"def fusion_api_get_certificate_info(self, uri=None, api=None, param='', headers=None):\n param = '/certificates/https/'\n return self.ic.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_switch_types(self, param='', api=None, headers=None):\n return self.swtypes.get(api=api, headers=headers, param=param)",
"def getProtocolPfn( self, pfnDict, withPort ):\n # pfnDict['Protocol'] = ''\n # pfnDict['Host'] = ''\n # pfnDict['Port'] = ''\n # pfnDict['WSUrl'] = ''\n res = pfnunparse( pfnDict )\n return res",
"def protocol_details(self) -> pulumi.Output['outputs.ServerProtocolDetails']:\n return pulumi.get(self, \"protocol_details\")"
] | [
"0.582647",
"0.5813553",
"0.56914395",
"0.55264306",
"0.55257183",
"0.5524292",
"0.548235",
"0.54644",
"0.545345",
"0.53170335",
"0.53170335",
"0.52891564",
"0.5145579",
"0.5117963",
"0.509841",
"0.5075175",
"0.50673515",
"0.5064264",
"0.50599957",
"0.50556576",
"0.49897924",
"0.49841008",
"0.49746266",
"0.49556792",
"0.4931513",
"0.4869385",
"0.4866362",
"0.48512897",
"0.48177767",
"0.48150975"
] | 0.7157788 | 0 |
Deletes a directory based on name OR uri. [Arguments] | def fusion_api_delete_directory(self, name=None, uri=None, api=None, headers=None):
return self.logindomain.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_directory(DirectoryId=None):\n pass",
"def rm(self, uri):\n path = osaka.utils.get_uri_path(uri)\n try:\n osaka.utils.LOGGER.debug(\"Removing {0} as a file\".format(uri))\n self.webdav.delete(path)\n except Exception as e:\n osaka.utils.LOGGER.debug(\n \"Removing {0} as a directory, file encountered error {1}\".format(uri, e)\n )\n self.webdav.rmdir(path)",
"def delete_directory_contents(conn_obj, path, device=\"dut\"):\n command = \"rm -rf {}/*\".format(path.rstrip(\"/\"))\n if device == \"dut\":\n st.config(conn_obj, command)\n else:\n conn_obj.execute_command(conn_obj, command)\n return True",
"def svn_fs_delete(*args):\r\n return _fs.svn_fs_delete(*args)",
"def delete_uri(\n self, uri: str, logger: Optional[logging.Logger] = default_logger\n ) -> int:\n local_dir = get_local_dir_from_uri(uri, self._resources_dir)\n local_dir_size = get_directory_size_bytes(local_dir)\n\n deleted = delete_package(uri, self._resources_dir)\n if not deleted:\n logger.warning(f\"Tried to delete nonexistent URI: {uri}.\")\n return 0\n\n return local_dir_size",
"def Delete_Dir(self,txn,filename):\n opid = self.new_opid()\n xaction = DeleteDir_Operation(os.path.join(self.home,filename),opid)\n self._add_operation(txn,xaction)",
"def rmdir (**kwargs):\n if 'params' in kwargs:\n params = kwargs['params']\n if 'path' in kwargs:\n path = kwargs['path']\n\n try:\n os.rmdir(path[0]+params[0])\n return ''\n except:\n return 'ERROR: Directory not empty or no such directory\\n'\n #except: #want to expand upon this later, better error handling\n #return 'Invalid Input: No such file or directory\\n'",
"def del_dir(name_del_path):\n\n try:\n os.rmdir(config_tools.full_dest+name_del_path)\n except OSError:\n print(f\"Удалить директорию {name_del_path} не удалось,каталог не найден или не является пустым.\")\n else:\n print(f\"Директория успешно удалена {name_del_path}\")",
"def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')",
"def remove_dir(site_name, directory): # untested - do I need/want this?\n # remove all documents from table site_documents where directory = given directory\n # we'd also need to remove the actual documents (I think no dml version does that yet)\n # if we add a per-site directories table also remove it from there\n raise NotImplementedError",
"def rm(args):\n args.delete = True\n return remove(args)",
"def svn_fs_delete_fs(*args):\r\n return _fs.svn_fs_delete_fs(*args)",
"def main_remove(args):\n return remove_command(args.directory, args.name)",
"def delete(self, uri, where, selectionArgs):\n pass",
"def delete_directory_config(DirectoryName=None):\n pass",
"def delete(self, uri, **kwargs):\n return self.session.delete(uri, **kwargs)",
"def rmdir(self, path: PathLike):",
"def delete(filename):\n storeapps = APP.config[\"storage\"]\n extension = os.path.basename(filename).split(\".\")[-1].upper()\n dirname = \".\".join(os.path.basename(filename).split(\".\")[:-1])\n directory = os.path.join(storeapps, extension, dirname)\n\n try:\n directory = directory.encode(\"utf-8\")\n except UnicodeDecodeError:\n pass\n\n if os.path.isdir(directory):\n shutil.rmtree(directory)\n if os.path.isdir(directory):\n return \"Unable to remove application (check server logs): %s\" % (filename), 500\n return \"Removed: %s\" % (filename), 200\n\n return \"File not found: %s\" % (filename), 404",
"def delete_dir(url_prefix, rse):\n try:\n endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url_prefix)\n bucket = _get_bucket(rse, endpoint, bucket_name)\n i = 0\n keys = []\n for key in bucket.list(prefix=key_name):\n keys.append(key.name)\n i += 1\n if i == 1000:\n ret = _delete_keys(bucket, keys)\n for ret_key in ret:\n if ret[ret_key]['status'] != 0:\n return ret[ret_key]['status'], ret[ret_key]['output']\n i = 0\n keys = []\n if len(keys):\n ret = _delete_keys(bucket, keys)\n for ret_key in ret:\n if ret[ret_key]['status'] != 0:\n return ret[ret_key]['status'], ret[ret_key]['output']\n return 0, None\n except:\n return -1, \"Failed to delete dir: %s, error: %s\" % (url_prefix, traceback.format_exc())",
"def rm(self, path):\n try:\n basedir, item = os.path.split(path)\n postdata = codecs.encode(json.dumps({ 'baseDir': basedir, 'items': [ item ] }), 'utf-8')\n self._urlopen('/api/fileops/delete', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to delete '{}'\".format(path))",
"def filedelete(fname):\n\n if os.path.exists(fname):\n try:\n if os.path.isdir(fname):\n # delete folder\n shutil.rmtree(fname)\n return\n else:\n # delete file\n os.remove(fname)\n return\n except:\n return\n else:\n return",
"def delete_dir(name):\n root_dir = get_data_dir()\n target_dir = root_dir / name\n if not is_relative_to(target_dir, root_dir) or target_dir == root_dir:\n return False\n try:\n shutil.rmtree(target_dir)\n return True\n except FileNotFoundError:\n return False",
"def rmdir(path):",
"def delete(self, prefix, paths):\n pass",
"def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)",
"def rm(self, s3uri, **kwargs):\n return self.exec_command('rm %s' % (s3uri), **kwargs)",
"def delete(node):\n try:\n if os.path.isdir(node):\n shutil.rmtree(node)\n else:\n os.unlink(node)\n except OSError as error:\n if error.errno not in [errno.ENOENT, errno.EPERM, errno.EACCES]:\n raise error",
"def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)",
"def delete(cls, uri):\n return cls._perform_request(uri, 'DELETE')",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()"
] | [
"0.66433924",
"0.65953314",
"0.6404519",
"0.6382594",
"0.6300447",
"0.62714815",
"0.6259328",
"0.62426025",
"0.6235144",
"0.6196587",
"0.6184736",
"0.61651444",
"0.61198676",
"0.6103151",
"0.60914314",
"0.60688454",
"0.6044329",
"0.6010868",
"0.6005834",
"0.5996815",
"0.5992961",
"0.59741265",
"0.5948654",
"0.59379643",
"0.5928138",
"0.5903396",
"0.59000385",
"0.589498",
"0.5878706",
"0.58777696"
] | 0.7201999 | 0 |
Update role assignment to a directory group. [Arguments] | def fusion_api_update_group_role_assignment(self, body, api=None, headers=None):
return self.LoginDomainsGroupToRoleMapping.update(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_role(self, group, role):\n self.permissions[group] = roles[role]",
"def set_role(userid, role, group, request=None):",
"def updateRole(role_name):\n\n if role_name == 'gsoc_mentor':\n updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')\n elif role_name == 'gsoc_org_admin':\n updater = RoleUpdater(\n GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')\n elif role_name == 'gsoc_student':\n updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')\n\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def update(self, role):\n self._router_request(\n self._make_request_data(\n 'updateAdminRole',\n data=dict(\n params=dict(\n uid=self.uid,\n name=self.name,\n role=role\n )\n )\n )\n )\n\n self.role = role\n\n return True",
"def update_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover",
"def assign_group_role_on_domain(self, domain_id, group_id, role_id):\n resp, body = self.put('domains/%s/groups/%s/roles/%s' %\n (domain_id, group_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)",
"def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")",
"def set_group(group_name):\n group_config = env.groups[group_name]\n set_role_defs(\n web=group_config['servers'][WEB_ROLE],\n db=group_config['servers'][DB_ROLE],\n )\n env.branch = group_config['branch']\n env.subdomain = group_config.get('subdomain', 'www')",
"def update(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.update_role(model)",
"def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})",
"def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result",
"def fusion_api_assign_roles_to_directory_group(self, body, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.create(body, api, headers)",
"def can_set_role(userid, role, group):",
"def test_update_team_user_group(client):\n group = client.update_team_user_group(TEAM_ID, GROUP_ID, {\n \"name\": \"Updated Python group\",\n \"is_reviewer\": False,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert group.name == \"Updated Python group\"\n assert group.permissions['is_admin']\n assert not group.permissions['is_reviewer']",
"def changeRole(self, node, role):",
"def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ):\n # This endpoint accepts application/x-www-form-urlencoded and requires the data be sent as form data.\n # The name/id should not be included in the form data.\n # Roles should be a comma separated list of strings.\n # If, only if, the role contains a bucket name then the rolename should be suffixed\n # with[<bucket_name>] e.g. bucket_full_access[default],security_admin.\n\n final_args = forward_args(kwargs, *options)\n final_args.update({k: v for k, v in group.as_dict.items() if k in {\n 'roles', 'description', 'ldap_group_reference'}})\n self._admin_bucket.group_upsert(group.name, **final_args)",
"def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response",
"def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)",
"def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)",
"def assign_group_role_on_project(self, project_id, group_id, role_id):\n resp, body = self.put('projects/%s/groups/%s/roles/%s' %\n (project_id, group_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)",
"def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)",
"def update_tag_group_acl(session, tag_id=None, group_id=None,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False, allow_read=False,\n date_modified=datetime.now(), username='system_user'\n ):\n session = validate_session(session)\n group = None\n\n if group_id and tag_id:\n group = session.query(TagGroupAccess).\\\n filter(TagGroupAccess.group_id == group_id).\\\n filter(TagGroupAccess.tag_id == tag_id).first()\n if group:\n try:\n group.allow_install = allow_install\n group.allow_uninstall = allow_uninstall\n group.allow_reboot = allow_reboot\n group.allow_schedule = allow_schedule\n group.allow_wol = allow_wol\n group.allow_snapshot_creation = allow_snapshot_creation\n group.allow_snapshot_removal = allow_snapshot_removal\n group.allow_snapshot_revert = allow_snapshot_revert\n group.allow_tag_creation = allow_tag_creation\n group.allow_tag_removal = allow_tag_removal\n group.allow_read = allow_read\n group.date_modified = date_modified\n session.commit()\n return({\n 'pass': True,\n 'message': 'ACL for Group %s was modified for Tag %s' % \\\n (group_id, tag_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to modify ACL for Group %s on Tag %s' % \\\n (group_id, tag_id)\n })\n else:\n return({\n 'pass': False,\n 'message': 'Invalid group_id %s and or tag_id' % \\\n (group_id, tag_id)\n })",
"def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)",
"def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)",
"def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)",
"async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))",
"async def async_update_mute(self):\n self._mute = await self._raumfeld.async_get_group_mute(self._rooms)",
"def manage_updateRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n self.updateRole(role_id, title, description)\n\n message = 'Role+updated'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?role_id=%s&'\n 'manage_tabs_message=%s' %\n (self.absolute_url(), role_id, message))"
] | [
"0.6839127",
"0.6786615",
"0.64544904",
"0.6158387",
"0.6040294",
"0.6031052",
"0.5998986",
"0.594505",
"0.5939835",
"0.5919381",
"0.5909824",
"0.5889286",
"0.58872354",
"0.5826187",
"0.5820612",
"0.5782797",
"0.5744905",
"0.5735358",
"0.57333267",
"0.5717252",
"0.5698031",
"0.56653017",
"0.564783",
"0.5646315",
"0.56460214",
"0.56311893",
"0.56167585",
"0.5587796",
"0.5526073",
"0.55096996"
] | 0.7115229 | 0 |
Delete Directory Group Role Assignment. [Arguments] | def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):
return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_del_group(dbsync, group):\n pass",
"def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')",
"def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})",
"def test_remove_learner_group_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[0]))",
"def delete_group(self, group):\n raise NotImplementedError('delete_group')",
"def delete_group(user):\n return 'do some magic!'",
"def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))",
"def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)",
"def test_delete_namespaced_role(self):\n pass",
"def test_delete_role(self):\n pass",
"def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)",
"async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)",
"def test_delete_resource_group(self):\n pass",
"def fusion_api_del_role_from_group(self, domain=None, group=None, api=None, headers=None):\n return self.roles.del_role_from_group(domain, group, api=api, headers=headers)",
"def security_group_rule_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group_rule(**kwargs)",
"def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1",
"def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE",
"def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS",
"def remove_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n\n permission = Permission.objects.get(\n codename=\"can_approve_estimated_completion_date\",\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.remove(permission)\n permission.delete()\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" removed from the \"Admin\" group.'\n )",
"def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))",
"def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")",
"def test_delete_group(self):\n pass",
"def test_delete_group(self):\n pass",
"def security_group_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group(**kwargs)",
"def test_delete_namespaced_role_binding(self):\n pass",
"async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")",
"def remove_group(args):\n\n # check config file is valid first\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"OIDC config file not valid, please use the verify function to debug\")\n return 1 \n\n result_remove_config_file = remove_group_from_json(args)\n result_remove_from_config = remove_group_config_file(args)\n\n if result_remove_config_file != 0 and result_remove_from_config != 0:\n print(\"Error. Group {} does not exist in DynaFed\".format(args.group))\n return 1\n\n if result_remove_config_file != 0 or result_remove_from_config != 0:\n print(\"Error while removing config for {}. Check {} is missing group and {}.conf is missing to ensure full removal.\".format(args.group, args.file, args.group))\n return 1\n return 0",
"def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))",
"def delete_salary_group(db:Session):\n pass",
"def test_delete_cluster_role_binding(self):\n pass"
] | [
"0.6509207",
"0.6505571",
"0.6501011",
"0.6458564",
"0.6450716",
"0.63913774",
"0.6387865",
"0.63852173",
"0.6364289",
"0.63577217",
"0.6325496",
"0.63238084",
"0.62984717",
"0.6296125",
"0.6289966",
"0.6285411",
"0.6266704",
"0.6266172",
"0.6220471",
"0.62161833",
"0.619778",
"0.61835086",
"0.61835086",
"0.6158073",
"0.6141956",
"0.6140784",
"0.61177355",
"0.61168164",
"0.609904",
"0.6043632"
] | 0.7217537 | 0 |
Retrieve role assignments for directory group under a directory/all directories group or a specify group. [Arguments] | def fusion_api_get_group_role_assignment(self, uri=None, param='', api=None, headers=None):
return self.LoginDomainsGroupToRoleMapping.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_list_role_assignment_using_inherited_sourced_groups(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1.\n # We should see the inherited group assigned on the 3 projects\n # from domain 0, as well as the direct assignments.\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 2,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_get_roles_for_groups_on_domain(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n group_list = []\n group_id_list = []\n role_list = []\n for _ in range(3):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n group_id_list.append(group['id'])\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one is inherited\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'])\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n domain_id=domain1['id'],\n role_id=role_list[2]['id'],\n inherited_to_projects=True)\n\n # Now get the effective roles for the groups on the domain project. We\n # shouldn't get back the inherited role.\n\n role_refs = self.assignment_api.get_roles_for_groups(\n group_id_list, domain_id=domain1['id'])\n\n self.assertThat(role_refs, matchers.HasLength(2))\n self.assertIn(role_list[0], role_refs)\n self.assertIn(role_list[1], role_refs)",
"def get_assign_permission(userid, group):",
"def test_get_roles_for_groups_on_project(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n domain2 = unit.new_domain_ref()\n self.resource_api.create_domain(domain2['id'], domain2)\n project1 = unit.new_project_ref(domain_id=domain1['id'])\n self.resource_api.create_project(project1['id'], project1)\n project2 = unit.new_project_ref(domain_id=domain2['id'])\n self.resource_api.create_project(project2['id'], project2)\n group_list = []\n group_id_list = []\n role_list = []\n for _ in range(6):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n group_id_list.append(group['id'])\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one inherited and one non-inherited on Domain1,\n # plus one on Project1\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n project_id=project1['id'],\n role_id=role_list[2]['id'])\n\n # ...and a duplicate set of spoiler assignments to Domain2/Project2\n self.assignment_api.create_grant(group_id=group_list[3]['id'],\n domain_id=domain2['id'],\n role_id=role_list[3]['id'])\n self.assignment_api.create_grant(group_id=group_list[4]['id'],\n domain_id=domain2['id'],\n role_id=role_list[4]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[5]['id'],\n project_id=project2['id'],\n role_id=role_list[5]['id'])\n\n # With inheritance on, we should also get back the inherited role from\n # its owning domain.\n\n role_refs = self.assignment_api.get_roles_for_groups(\n group_id_list, project_id=project1['id'])\n\n self.assertThat(role_refs, matchers.HasLength(2))\n self.assertIn(role_list[1], role_refs)\n self.assertIn(role_list[2], role_refs)",
"def locate_group_users(self, group):\n return self.ldap_connection.search_s(\"ou=Groups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, 'cn={0}'.format(group))",
"def doSearch(acl_tool, groupId):\n rolemakers = acl_tool.plugins.listPlugins(IRolesPlugin)\n group = acl_tool.getGroupById(groupId)\n allAssignedRoles = []\n for rolemaker_id, rolemaker in rolemakers:\n allAssignedRoles.extend(rolemaker.getRolesForPrincipal(group))\n return allAssignedRoles",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def test_inherited_role_grants_for_group(self):\n role_list = []\n for _ in range(4):\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n user1 = unit.new_user_ref(domain_id=domain1['id'])\n user1 = self.identity_api.create_user(user1)\n group1 = unit.new_group_ref(domain_id=domain1['id'])\n group1 = self.identity_api.create_group(group1)\n group2 = unit.new_group_ref(domain_id=domain1['id'])\n group2 = self.identity_api.create_group(group2)\n project1 = unit.new_project_ref(domain_id=domain1['id'])\n self.resource_api.create_project(project1['id'], project1)\n\n self.identity_api.add_user_to_group(user1['id'],\n group1['id'])\n self.identity_api.add_user_to_group(user1['id'],\n group2['id'])\n\n roles_ref = self.assignment_api.list_grants(\n user_id=user1['id'],\n project_id=project1['id'])\n self.assertEqual(0, len(roles_ref))\n\n # Create two roles - the domain one is not inherited\n self.assignment_api.create_grant(user_id=user1['id'],\n project_id=project1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group1['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'])\n\n # Now get the effective roles for the user and project, this\n # should only include the direct role assignment on the project\n combined_list = self.assignment_api.get_roles_for_user_and_project(\n user1['id'], project1['id'])\n self.assertEqual(1, len(combined_list))\n self.assertIn(role_list[0]['id'], combined_list)\n\n # Now add to more group roles, both inherited, to the domain\n self.assignment_api.create_grant(group_id=group2['id'],\n domain_id=domain1['id'],\n role_id=role_list[2]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group2['id'],\n domain_id=domain1['id'],\n role_id=role_list[3]['id'],\n inherited_to_projects=True)\n\n # Now get the effective roles for the user and project again, this\n # should now include the inherited roles on the domain\n combined_list = self.assignment_api.get_roles_for_user_and_project(\n user1['id'], project1['id'])\n self.assertEqual(3, len(combined_list))\n self.assertIn(role_list[0]['id'], combined_list)\n self.assertIn(role_list[2]['id'], combined_list)\n self.assertIn(role_list[3]['id'], combined_list)\n\n # TODO(henry-nash): The test above uses get_roles_for_user_and_project\n # which will, in a subsequent patch, be re-implemented to simply call\n # list_role_assignments (see blueprint remove-role-metadata).\n #\n # The test plan below therefore mirrors this test, to ensure that\n # list_role_assignments works the same. Once\n # get_roles_for_user_and_project has been re-implemented then the\n # manual tests above can be refactored to simply ensure it gives\n # the same answers.\n test_plan = {\n # A domain with a user and project, 2 groups, plus 4 roles.\n 'entities': {'domains': {'users': 1, 'projects': 1, 'groups': 2},\n 'roles': 4},\n 'group_memberships': [{'group': 0, 'users': [0]},\n {'group': 1, 'users': [0]}],\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'domain': 0},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 3, 'domain': 0,\n 'inherited_to_projects': True}],\n 'tests': [\n # List all effective assignments for user[0] on project[0].\n # Should get one direct role and both inherited roles, but\n # not the direct one on domain[0], even though user[0] is\n # in group[0].\n {'params': {'user': 0, 'project': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'project': 0},\n {'user': 0, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0, 'group': 1}},\n {'user': 0, 'role': 3, 'project': 0,\n 'indirect': {'domain': 0, 'group': 1}}]}\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def get_group(group):\n\n return ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])",
"def test_list_role_assignment_by_user_with_domain_group_roles(self):\n test_plan = {\n # A domain with 3 users, 3 groups, a spoiler domain\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3}, 1],\n 'roles': 3},\n # Users 1 & 2 are in the group 0, User 1 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 0},\n {'group': 1, 'role': 2, 'domain': 0},\n # ...and two spoiler assignments\n {'user': 1, 'role': 1, 'domain': 0},\n {'group': 2, 'role': 2, 'domain': 0}],\n 'tests': [\n # List all effective assignments for user[0].\n # Should get one direct user role and a user roles for each of\n # groups 0 and 1\n {'params': {'user': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 0, 'role': 2, 'domain': 0,\n 'indirect': {'group': 1}}\n ]},\n # Adding domain[0] as a filter should return the same data\n {'params': {'user': 0, 'domain': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 0, 'role': 2, 'domain': 0,\n 'indirect': {'group': 1}}\n ]},\n # Using domain[1] should return nothing\n {'params': {'user': 0, 'domain': 1, 'effective': True},\n 'results': []},\n # Using user[2] should return nothing\n {'params': {'user': 2, 'domain': 0, 'effective': True},\n 'results': []},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_list_role_assignment_using_sourced_groups_with_domains(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 1,\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_list_role_assignment_by_domain(self):\n test_plan = {\n # A domain with 3 users, 1 group, a spoiler domain and 2 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 1}, 1],\n 'roles': 2},\n # Users 1 & 2 are in the group\n 'group_memberships': [{'group': 0, 'users': [1, 2]}],\n # Assign a role for user 0 and the group\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 0}],\n 'tests': [\n # List all effective assignments for domain[0].\n # Should get one direct user role and user roles for each of\n # the users in the group.\n {'params': {'domain': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 1, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 2, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}}\n ]},\n # Using domain[1] should return nothing\n {'params': {'domain': 1, 'effective': True},\n 'results': []},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def dictGetHierarchy_granted_via_role(self, node=None):\n\n user_name = f\"user_{getuid()}\"\n role_name = f\"role_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"), role(node, f\"{role_name}\"):\n\n with When(\"I grant the role to the user\"):\n node.query(f\"GRANT {role_name} TO {user_name}\")\n\n Suite(run=dictGetHierarchy_check,\n examples=Examples(\"privilege on grant_target_name user_name\", [\n tuple(list(row)+[role_name,user_name]) for row in dictGetHierarchy_check.examples\n ], args=Args(name=\"check privilege={privilege}\", format_name=True)))",
"def get_group_access(self, group):\n return self._access_lists.get_group_access(group)",
"def synchronize_group(self, group, prefix, blacklist):\n\n try:\n group_name = group[1]['cn'][0]\n group_members = group[1]['member']\n except Exception as e:\n self.logger.error(\"Failed to retrieve group name and members: {0}\".format(e))\n return False\n\n self.logger.debug(\n \"Group '{0}' has members: {1}\".format(\n group_name, group_members\n )\n )\n\n role_match = None\n role_match = re.search(\n '^{}(?P<role_name>[a-zA-Z0-9_]+)'.format(prefix), group_name\n )\n\n if role_match:\n role_name = role_match.groups('role_name')[0]\n else:\n self.logger.warning(\n \"Group '{0}' did not match the pattern, skipping...\".format(\n group_name\n )\n )\n return False\n\n if role_name in blacklist:\n self.logger.info(\n \"Skipping group '{0}' which is on the blacklist.\".format(\n group_name\n )\n )\n return False\n\n # First, ensure that the role exists\n try:\n self.psql_cur.execute(\n \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\".format(role_name)\n )\n result = self.psql_cur.fetchone()\n except psycopg2.Error as e:\n self.logger.error(unicode(e.message).encode('utf-8'))\n return False\n\n if not result or result[0] == 0:\n self.logger.warning(\n \"Group {0} does not have a PG role, skipping...\".format(\n group_name\n )\n )\n return False\n\n # Second, extract each member from the list.\n try:\n authorized_users = self.extract_users(group_members)\n except Exception as e:\n self.logger.error(\n \"Failed to extract users from LDAP for {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Third, add authorized users to the role\n try:\n self.add_authorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to add users to the PG role for group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n # Lastly, remove all users that are not on the list\n try:\n self.purge_unauthorized_users(role_name, authorized_users)\n except Exception as e:\n self.logger.error(\n \"Failed to remove unauthorized users from group {0}: {1}\".format(\n group_name, e\n )\n )\n return False\n\n return True",
"def GetGroupMembers(self, group):\n return []",
"def RetrieveAgentInGroup(**argd):\n flag, ret = CGateway.core.RetrieveAgentInGroup(argd[\"session\"], argd[\"name\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})",
"def get_roles(role):",
"def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))",
"def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list",
"def test_list_role_for_all_namespaces(self):\n pass",
"def test_list_domains_for_groups(self):\n domain_list = []\n group_list = []\n group_id_list = []\n for _ in range(3):\n domain = unit.new_domain_ref()\n self.resource_api.create_domain(domain['id'], domain)\n domain_list.append(domain)\n\n group = unit.new_group_ref(domain_id=domain['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n group_id_list.append(group['id'])\n\n role1 = unit.new_role_ref()\n self.role_api.create_role(role1['id'], role1)\n\n # Assign the roles - one is inherited\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain_list[0]['id'],\n role_id=role1['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain_list[1]['id'],\n role_id=role1['id'])\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n domain_id=domain_list[2]['id'],\n role_id=role1['id'],\n inherited_to_projects=True)\n\n # Now list the domains that have roles for any of the 3 groups\n # We shouldn't get back domain[2] since that had an inherited role.\n\n domain_refs = (\n self.assignment_api.list_domains_for_groups(group_id_list))\n\n self.assertThat(domain_refs, matchers.HasLength(2))\n self.assertIn(domain_list[0], domain_refs)\n self.assertIn(domain_list[1], domain_refs)",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def test_list_projects_for_groups(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n domain2 = unit.new_domain_ref()\n self.resource_api.create_domain(domain2['id'], domain2)\n project1 = unit.new_project_ref(domain_id=domain1['id'])\n project1 = self.resource_api.create_project(project1['id'], project1)\n project2 = unit.new_project_ref(domain_id=domain1['id'])\n project2 = self.resource_api.create_project(project2['id'], project2)\n project3 = unit.new_project_ref(domain_id=domain1['id'])\n project3 = self.resource_api.create_project(project3['id'], project3)\n project4 = unit.new_project_ref(domain_id=domain2['id'])\n project4 = self.resource_api.create_project(project4['id'], project4)\n group_list = []\n role_list = []\n for _ in range(7):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one inherited and one non-inherited on Domain1,\n # plus one on Project1 and Project2\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n project_id=project1['id'],\n role_id=role_list[2]['id'])\n self.assignment_api.create_grant(group_id=group_list[3]['id'],\n project_id=project2['id'],\n role_id=role_list[3]['id'])\n\n # ...and a few of spoiler assignments to Domain2/Project4\n self.assignment_api.create_grant(group_id=group_list[4]['id'],\n domain_id=domain2['id'],\n role_id=role_list[4]['id'])\n self.assignment_api.create_grant(group_id=group_list[5]['id'],\n domain_id=domain2['id'],\n role_id=role_list[5]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[6]['id'],\n project_id=project4['id'],\n role_id=role_list[6]['id'])\n\n group_id_list = [group_list[1]['id'], group_list[2]['id'],\n group_list[3]['id']]\n\n # With inheritance on, we should also get back the Project3 due to the\n # inherited role from its owning domain.\n project_refs = (\n self.assignment_api.list_projects_for_groups(group_id_list))\n\n self.assertThat(project_refs, matchers.HasLength(3))\n self.assertIn(project1, project_refs)\n self.assertIn(project2, project_refs)\n self.assertIn(project3, project_refs)",
"def listdirs(self):\n return self.list_groups()",
"def get_memberships(self, kwargs):\n account = kwargs[\"account\"]\n recursive = kwargs.get(\"recursive\", False)\n\n already_printed = set()\n\n def lookup_groups(dn, leading_sp, already_treated):\n results = self.engine.query(self.engine.DISTINGUISHED_NAME(dn), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n if group_dn not in already_treated:\n print(\"{g:>{width}}\".format(g=group_dn, width=leading_sp + len(group_dn)))\n already_treated.add(group_dn)\n lookup_groups(group_dn, leading_sp + 4, already_treated)\n\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"]:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n already_treated.add(results[0][\"dn\"])\n\n return already_treated\n\n results = self.engine.query(self.engine.ACCOUNT_IN_GROUPS_FILTER(account), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n print(group_dn)\n if recursive:\n already_printed.add(group_dn)\n s = lookup_groups(group_dn, 4, already_printed)\n already_printed.union(s)\n\n # for some reason, when we request an attribute which is not set on an object,\n # ldap3 returns an empty list as the value of this attribute\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"] != []:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n print(results[0][\"dn\"])",
"def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)",
"def test_get_role_by_user_and_project_with_user_in_group(self):\n user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)\n user_ref = self.identity_api.create_user(user_ref)\n\n project_ref = unit.new_project_ref(\n domain_id=CONF.identity.default_domain_id)\n self.resource_api.create_project(project_ref['id'], project_ref)\n\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group_id = self.identity_api.create_group(group)['id']\n self.identity_api.add_user_to_group(user_ref['id'], group_id)\n\n role_ref_list = []\n for i in range(2):\n role_ref = unit.new_role_ref()\n self.role_api.create_role(role_ref['id'], role_ref)\n role_ref_list.append(role_ref)\n\n self.assignment_api.add_role_to_user_and_project(\n user_id=user_ref['id'],\n tenant_id=project_ref['id'],\n role_id=role_ref['id'])\n\n role_list = self.assignment_api.get_roles_for_user_and_project(\n user_ref['id'],\n project_ref['id'])\n\n self.assertEqual(set([r['id'] for r in role_ref_list]),\n set(role_list))",
"def test_list_role_assignment_fails_with_userid_and_source_groups(self):\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.create_group(group)\n self.assertRaises(exception.UnexpectedError,\n self.assignment_api.list_role_assignments,\n effective=True,\n user_id=self.user_foo['id'],\n source_from_group_ids=[group['id']])"
] | [
"0.5716845",
"0.5665079",
"0.56083006",
"0.54704577",
"0.5459695",
"0.54367876",
"0.5423686",
"0.5395236",
"0.537699",
"0.5319862",
"0.52708954",
"0.5258562",
"0.52357024",
"0.5224857",
"0.5221974",
"0.5152498",
"0.5134285",
"0.5104248",
"0.5071822",
"0.50717807",
"0.5041352",
"0.5028388",
"0.50195086",
"0.49851096",
"0.49693298",
"0.49440536",
"0.4922358",
"0.49174717",
"0.48846966",
"0.4874286"
] | 0.6416643 | 0 |
Validate Group and Roles [Arguments] | def fusion_api_validate_group_and_roles(self, body, api=None, headers=None):
return self.LoginDomainsGroupToRoleMapping.validate(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _ValidateRoles(dataproc, pools):\n if not pools:\n # The backend will automatically create the default pool.\n return\n seen_roles = set()\n for pool in pools:\n for role in pool.roles:\n if role in seen_roles:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Multiple pools contained the same role \"%s\".' % role)\n else:\n seen_roles.add(role)\n\n default = dataproc.messages.GkeNodePoolTarget.RolesValueListEntryValuesEnum(\n 'DEFAULT')\n if default not in seen_roles:\n raise exceptions.InvalidArgumentException(\n '--pools',\n 'If any pools are specified, then exactly one must have the '\n '\"default\" role.')",
"def validate(self, data):\n groups = data['groups']\n if all(self.context['request'].user.has_perm(\"change_group\", group) for group in groups) or set(groups) <= set(\n self.context['request'].user.groups.all()):\n return data\n else:\n raise PermissionDenied()",
"def _validate_roles(roles: Dict[str, Any]) -> Tuple[bool, str]:\n # check type\n if not isinstance(roles, dict):\n return (\n False,\n \"Invalid type for roles. Expected dict. Found '{}'.\".format(type(roles)),\n )\n\n # check number of roles\n if not 1 <= len(roles) <= 2:\n return (\n False,\n \"There must be either 1 or 2 roles defined in this dialogue. Found {}\".format(\n len(roles)\n ),\n )\n\n # check each role's format\n for role in roles:\n if not _is_valid_regex(ROLE_REGEX_PATTERN, role):\n return (\n False,\n \"Invalid name for role '{}'. Role names must match the following regular expression: {} \".format(\n role, ROLE_REGEX_PATTERN\n ),\n )\n\n return True, \"Dialogue roles are valid.\"",
"def can_set_role(userid, role, group):",
"def clean_role():",
"def group_required(*groups):\n\n def decorator(func):\n @wraps(func)\n def check_auth(*args, **kwargs):\n check_user_group(*groups)\n return func (*args, **kwargs)\n return check_auth\n return decorator",
"def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)",
"def _check_groups_support(self, groups=()):\n available_groups = set(self.df[self.col_group].unique())\n for group in groups:\n assert group in available_groups, \"Group %s is not in the dataset provided\" % group",
"def validate_role():\n\n target_country = DEF_ROLE_TARGET_COUNTRY\n pairs = DEF_ROLE_COUNTRY_PAIRS\n\n def wrapper(self):\n \"\"\"Wrapper method.\n \"\"\"\n\n cleaned_data = self.cleaned_data\n\n for country_field, state_field in pairs:\n country = cleaned_data.get(country_field)\n state = cleaned_data.get(state_field)\n\n if country is None or state is None:\n continue\n\n if country != target_country:\n continue\n\n if len(state) != 2:\n raise forms.ValidationError(DEF_2_LETTER_STATE_FMT % target_country)\n\n return cleaned_data\n\n return wrapper",
"def test_verify_that_you_can_create_a_new_group():",
"def test_group_by_roles(self):\n self._test_group_by('Roles', [1, 5])",
"def at_least_a_group(exp, mesh, mod):\n is_valid = True\n if not exp.find_groups(mesh):\n mess = \"At least a group needs to be defined on the selected object\"\n mod.launch(GC.ERROR, mess)\n is_valid = False\n return is_valid",
"def group_required(*group_names):\n\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)",
"def test_list_role_assignment_fails_with_userid_and_source_groups(self):\n group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.create_group(group)\n self.assertRaises(exception.UnexpectedError,\n self.assignment_api.list_role_assignments,\n effective=True,\n user_id=self.user_foo['id'],\n source_from_group_ids=[group['id']])",
"def validate_mutually_exclusive(namespace, required, param1, param2):\n value1 = getattr(namespace, param1, None)\n value2 = getattr(namespace, param2, None)\n\n message = None\n if not value1 and not value2 and required:\n message = \"One of the following arguments are required: \\n\"\n elif value1 and value2:\n message = (\"The follow arguments are mutually \"\n \"exclusive and cannot be combined: \\n\")\n if message:\n missing = ','.join([arg_name(param1), arg_name(param2)])\n message += missing\n raise ValueError(message)",
"def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)",
"def group_required(*group_names):\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)",
"def _groups_validation(groups: torch.Tensor, num_groups: int) -> None:\n if torch.max(groups) > num_groups:\n raise ValueError(\n f\"The largest number in the groups tensor is {torch.max(groups)}, which is larger than the specified\",\n f\"number of groups {num_groups}. The group identifiers should be ``0, 1, ..., (num_groups - 1)``.\",\n )\n if groups.dtype != torch.long:\n raise ValueError(f\"Excpected dtype of argument groups to be long, not {groups.dtype}.\")",
"def group_required(*group_names):\n\tdef in_groups(u):\n\t\tif u.is_authenticated():\n\t\t\tif bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n\t\t\t\treturn True\n\t\treturn False\n\treturn user_passes_test(in_groups, login_url='/')",
"def test_add_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.add_learner_group', self.classrooms[1]))",
"def create_predefined_roles(sender, **kwargs):\n from django.contrib.contenttypes.models import ContentType\n from w2s.defaults import TaskDefaults\n from users.models import Roles\n\n if ContentType.objects.filter(app_label='auth', model='group').exists() and ContentType.objects.filter(app_label='users', model='roles').exists():\n predefined_roles = TaskDefaults.get_predefined_roles()\n for role_alias, role_name in predefined_roles.items():\n group_model = ContentType.objects.filter(app_label='auth', model='group')[0].model_class()\n\n if not group_model.objects.filter(name=role_name).exists():\n access_specifiers = TaskDefaults.get_predefined_role_access_specifiers(role_alias=role_alias)\n allowed_permissions_sets = [\n TaskDefaults.get_access_specifier_permissions(specifier)[0] for specifier in access_specifiers]\n allowed_permissions = list(set([item for sublist in allowed_permissions_sets for item in sublist]))\n\n # Creating Group\n group_instance = group_model.objects.create(name=role_name)\n group_instance.permissions.set(allowed_permissions)\n if group_instance.save() is None:\n print('\\033[0;37;42m Generated new role \"%s\", Applying details... \\033[0m' % role_alias)\n\n # Creating Role detail\n role_instance = Roles.objects.create(\n group = group_instance,\n alias = role_alias,\n accesses = ','.join(access_specifiers),\n description = 'Predefined role for %s' % role_alias\n )\n\n if role_instance.save() is None:\n print('\\033[0;37;42m Details applied for role: %s \\033[0m' % role_alias)\n else:\n print('---- Error while generating predefined roles ---')\n print(' -Either auth.group or users.roles model does not exists !!!')",
"def _SplitRoles(dataproc, arg_roles, support_shuffle_service=False):\n roles = []\n support_shuffle_service = _GkeNodePoolTargetParser._ARG_ROLE_TO_API_ROLE\n if support_shuffle_service:\n defined_roles = _GkeNodePoolTargetParser._ARG_ROLE_TO_API_ROLE.copy()\n defined_roles.update({'shuffle-service': 'SHUFFLE_SERVICE'})\n for arg_role in arg_roles.split(';'):\n if arg_role.lower() not in defined_roles:\n raise exceptions.InvalidArgumentException(\n '--pools', 'Unrecognized role \"%s\".' % arg_role)\n roles.append(\n dataproc.messages.GkeNodePoolTarget.RolesValueListEntryValuesEnum(\n defined_roles[arg_role.lower()]))\n return roles",
"def test_get_roles_for_groups_on_domain(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n group_list = []\n group_id_list = []\n role_list = []\n for _ in range(3):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n group_id_list.append(group['id'])\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one is inherited\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'])\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n domain_id=domain1['id'],\n role_id=role_list[2]['id'],\n inherited_to_projects=True)\n\n # Now get the effective roles for the groups on the domain project. We\n # shouldn't get back the inherited role.\n\n role_refs = self.assignment_api.get_roles_for_groups(\n group_id_list, domain_id=domain1['id'])\n\n self.assertThat(role_refs, matchers.HasLength(2))\n self.assertIn(role_list[0], role_refs)\n self.assertIn(role_list[1], role_refs)",
"def _check_groups_docker():\n if not _user_belongs_to('docker'):\n _raise_group_error('docker')",
"def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))",
"def validate_arguments(self,args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)",
"def test_subroles(self):\n def check_roles(r):\n dev_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[2]\n mem_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[3]\n assert 'All users in Admin group' in dev_holder.text\n assert 'All users in Developer group' in mem_holder.text\n\n r = self.app.get('/admin/groups/')\n\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n # test that subroles are intact after user added\n with audits('add user test-user to Admin'):\n r = self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)\n # test that subroles are intact after user deleted\n with audits('remove user test-user from Admin'):\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)",
"def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)",
"def tests_names_roles(verbose=True):\n # good names\n names = []\n with open(\"tests/good_names.txt\") as f:\n for line in f.readlines():\n line = line.replace('\\n', '')\n if line and not line.startswith(\"#\"):\n names.append(line)\n for name in names:\n if verbose:\n print(f\"'{name}' --> '{sanitize_name(name)}' of group '{extract_role(name)}'\")\n # assert check_name(name)\n role = extract_role(name)\n assert check_role(role) and role != \"TODO\"\n\n # bad names\n names = []\n with open(\"tests/bad_names.txt\") as f:\n for line in f.readlines():\n line = line.replace('\\n', '')\n if line and not line.startswith(\"#\"):\n names.append(line)\n for name in names:\n if verbose:\n print(f\"'{name}' --> '{sanitize_name(name)}' of group '{extract_role(name)}'\")\n # assert check_name(name)\n role = extract_role(name)\n assert (not check_name(name)) or (role == \"TODO\")",
"def group(description, *funcs, **kwargs):\n def _argument_group(parser):\n if kwargs.get('mutually_exclusive'):\n kwargs.pop('mutually_exclusive')\n g = parser.add_mutually_exclusive_group(**kwargs)\n elif kwargs:\n raise UserWarning(\n \"Unrecognized kwargs: %s\" % str(list(kwargs.keys())))\n else:\n g = parser.add_argument_group(description)\n for f in funcs:\n f(g)\n return _argument_group"
] | [
"0.63680756",
"0.625113",
"0.5933205",
"0.5872939",
"0.5811343",
"0.57347214",
"0.5650826",
"0.5630459",
"0.5623351",
"0.5616661",
"0.55777454",
"0.5577008",
"0.5567598",
"0.55302215",
"0.5528641",
"0.55236584",
"0.55154485",
"0.54752195",
"0.54318565",
"0.5424925",
"0.5424221",
"0.54145116",
"0.54089487",
"0.5386309",
"0.5382081",
"0.53623873",
"0.53621495",
"0.53174275",
"0.5312352",
"0.52818257"
] | 0.678552 | 0 |
Get Login Domains Global Settings for specified appliance [Arguments] | def fusion_api_get_login_domains_global_settings(self, api=None, headers=None, param=''):
return self.domain_settings.get(api, headers, param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_account_settings():\n pass",
"def get_ipa_conf():\n\n parser = RawConfigParser()\n parser.read(paths.IPA_DEFAULT_CONF)\n result = dict()\n for item in ['basedn', 'realm', 'domain', 'server', 'host', 'xmlrpc_uri']:\n if parser.has_option('global', item):\n value = parser.get('global', item)\n else:\n value = None\n if value:\n result[item] = value\n\n return result",
"def get_client_settings_env(**_):\r\n username = os.environ.get('SL_USERNAME')\r\n api_key = os.environ.get('SL_API_KEY')\r\n proxy = os.environ.get('https_proxy')\r\n\r\n config = {'proxy': proxy}\r\n if username and api_key:\r\n config['auth'] = BasicAuthentication(username, api_key)\r\n return config",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def _config(request):\n return request.environ['adminish']",
"def config(gvar):\n\n mandatory = []\n required = []\n optional = ['-cc', '-ckv', '-CSEP', '-CSV', '-g', '-H', '-h', '-NV', '-ok', '-r', '-s', '-V', '-VC', '-v', '-x509', '-xA']\n\n if gvar['retrieve_options']:\n return mandatory + required + optional\n\n # Check for missing arguments or help required.\n form_data = check_keys(\n gvar,\n mandatory,\n required,\n optional,\n key_map=KEY_MAP)\n\n # List the current defaults. If the form_data contains any optional fields,\n # those values will be updated before the list is retrieved.\n response = requests(\n gvar,\n '/server/config/',\n form_data\n )\n \n if response['message']:\n print(response['message'])\n\n # Print report\n show_active_user_groups(gvar, response)\n\n show_table(\n gvar,\n response['config_list'],\n [\n 'category/Category,k',\n 'config_key/Config Key,k',\n 'config_type/Type',\n 'config_value/Value',\n ],\n title=\"Server Configuration\",\n )",
"def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()",
"def fusion_api_get_global_settings(self, uri=None, api=None, headers=None, param=''):\n return self.settings.get(uri, api, headers, param)",
"def global_value(config, default):\n for opt in config.permit_root_login:\n if (opt.in_match is None or opt.in_match[0].lower() == 'all'):\n return opt.value\n return default",
"def fusion_api_edit_login_domains_global_settings(self, body, param='', api=None, headers=None):\n return self.domain_settings.put(body, param, api, headers)",
"def get(self, **kwargs):\r\n # groups = kwargs.get('groups')\r\n return {\r\n 'app_fullname': main_config.app_name,\r\n 'app_name': main_config.package_name,\r\n 'app_version': main_config.app_version\r\n }",
"def client_settings():\n return CLIENT_SETTINGS",
"def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))",
"def get_client_settings(**kwargs):\r\n all_settings = {}\r\n for setting_method in SETTING_RESOLVERS:\r\n settings = setting_method(**kwargs)\r\n if settings:\r\n settings.update((k, v) for k, v in all_settings.items() if v)\r\n all_settings = settings\r\n if all_settings.get('auth'):\r\n break\r\n return all_settings",
"def appcfg_login(app):\n if not _GAE_SDK_PATH:\n raise ValueError('Call setup_gae_sdk first')\n if os.path.exists(_appcfg_oauth2_tokens()):\n os.remove(_appcfg_oauth2_tokens())\n # HACK: Call a command with no side effect to launch the flow.\n subprocess.call([\n sys.executable,\n os.path.join(_GAE_SDK_PATH, 'appcfg.py'),\n '--application', app.app_id,\n '--noauth_local_webserver',\n 'list_versions',\n ], cwd=app.app_dir)",
"def googledrive_config_get(node_addon, auth, **kwargs):\n return {\n 'result': serialize_settings(node_addon, auth.user),\n }",
"def service_config():\n global _service_config\n if not _service_config:\n r = requests.get('https://tech.lds.org/mobile/ldstools/config.json')\n r.raise_for_status()\n _service_config = r.json()\n return _service_config",
"def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)",
"def GetAllSettings(appname, section):\n settings = _OptionsDB(appname)\n return settings.getAll(section)",
"def get_details():\n if not hasattr(env, \"site_name\"):\n env.site_name = prompt(\"Enter site domain name:\")\n env.site_is_secure = confirm(\"Do you need SSL? (Yes/No)\", default=False)\n env.app_server = prompt(\"Enter app server you wish to use (apache/uwsgi/gunicorn):\")\n if env.site_is_secure:\n env.ip_address = prompt(\"Enter server IP address:\")\n else:\n env.ip_address = \"0.0.0.0\"\n\n # Find out project name\n project_name = env.site_name.split('.')\n try:\n if project_name[1] == 'com':\n # Sample case - abc.com\n env.project_name = project_name[0]\n else:\n # Sample case - shop.abc.com\n env.project_name = project_name[1]\n except IndexError:\n env.project_name = env.site_name",
"def _get_config_data(self, cr, uid):\n\n model_conf = self.pool.get('customer.support.settings')\n args = [('selected', '=', True)] \n ids = model_conf.search(cr, uid, args)\n config = model_conf.browse(cr, uid, ids[0])\n\n return {\n 'tor_api_key': config.tor_api_key,\n 'tor_domain': config.tor_domain,\n 'company': config.company\n }",
"def app_settings():\n return {\n 'app_wksp_path': os.path.join(App.get_app_workspace().path, ''),\n 'threddsdatadir': App.get_custom_setting(\"thredds_path\"),\n 'threddsurl': App.get_custom_setting(\"thredds_url\"),\n 'logfile': os.path.join(App.get_app_workspace().path, 'workflow.log')\n }",
"def cli(ctx):\n return ctx.gi.cannedvalues.get_values()",
"def config_parse_file():\n global ANGELCO_EMAIL, ANGELCO_PASSWORD\n\n print(\"Parsing the config file...\")\n config = configparser.ConfigParser()\n with open('dwh.cfg') as configfile:\n config.read_file(configfile)\n\n ANGELCO_EMAIL = config.get('ANGELCO', 'EMAIL')\n ANGELCO_PASSWORD = config.get('ANGELCO', 'PASSWORD')",
"def settings():\n return _get_settings()[1]",
"async def dashboard(request):\n return [\n {'name': 'application config', 'value': {k: str(v) for k, v in app.cfg}},\n {'name': 'request headers', 'value': dict(request.headers)},\n ]",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def getconfig(self):\n self.cmdargs.parse_args(self.args)\n config = self._getconfig(self.sources)\n\n if self.needlogin:\n config.credentials = { \n k: getattr(config, self.credentialKey[k].name)\n for k in self.authenticatorInfo.getCredentialKeys(config.auth)\n }\n\n config._freeze_varnames()\n return (self.client, config)",
"def default_user_settings(self) -> pulumi.Input['DomainUserSettingsArgs']:\n return pulumi.get(self, \"default_user_settings\")"
] | [
"0.5933216",
"0.58953714",
"0.5745298",
"0.5708011",
"0.56591547",
"0.5479282",
"0.54645914",
"0.5458572",
"0.5451832",
"0.54214597",
"0.5353748",
"0.5334436",
"0.53038144",
"0.5299923",
"0.5270244",
"0.5248232",
"0.524819",
"0.5247961",
"0.5243685",
"0.52108777",
"0.5208614",
"0.5201351",
"0.5195228",
"0.5187297",
"0.51596504",
"0.5155616",
"0.5137186",
"0.5110137",
"0.5107498",
"0.50961196"
] | 0.7268448 | 0 |
Put Login Domains Global Settings for specified appliance [Arguments] | def fusion_api_edit_login_domains_global_settings(self, body, param='', api=None, headers=None):
return self.domain_settings.put(body, param, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_config(self, args):\n if args.set == \"store_password\":\n put_config_value(\"store_password\", True if args.value.lower() == \"yes\" else False)\n elif args.set == \"password\":\n put_config_value(\"password\", args.value)\n elif args.set == \"username\":\n put_config_value(\"username\", args.value)\n else:\n print(\"Invalid option\")",
"def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)",
"def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)",
"def appcfg_login(app):\n if not _GAE_SDK_PATH:\n raise ValueError('Call setup_gae_sdk first')\n if os.path.exists(_appcfg_oauth2_tokens()):\n os.remove(_appcfg_oauth2_tokens())\n # HACK: Call a command with no side effect to launch the flow.\n subprocess.call([\n sys.executable,\n os.path.join(_GAE_SDK_PATH, 'appcfg.py'),\n '--application', app.app_id,\n '--noauth_local_webserver',\n 'list_versions',\n ], cwd=app.app_dir)",
"def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()",
"def setprivileged(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def fusion_api_get_login_domains_global_settings(self, api=None, headers=None, param=''):\n return self.domain_settings.get(api, headers, param)",
"def init(args):\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()",
"def configure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA auth proxy'\n )",
"def configure_aaa_authentication_login(device,auth_list,auth_type, group_name=''):\n logger.info(f\"Configuring aaa authentication login\")\n\n configs=f\"aaa authentication login {auth_list} {auth_type}\"\n\t\n if group_name:\n configs+=f' group {group_name}'\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa authentication login. Error:\\n{e}\")",
"def __main__(*args):\n config = parse_args(args)\n validate_config(config)\n apply_registration_settings(config)",
"def set_config(app):\n # set config from config.py\n app.config.from_object('config')\n\n # override config from secret conf files\n pi_home = os.path.dirname(app.config['ENVPATH']) # /home/pi\n secret_conf_dir = os.path.join(pi_home, 'CONFIG_CHAUDIERE') # /home/pi/CONFIG_CHAUDIERE\n secret_conf_com_file = 'chaudiere_secret_config.py'\n secret_conf_com = secret_conf_dir+'/'+secret_conf_com_file\n try:\n with open(secret_conf_com) as f:\n json_config = json.load(f)\n for conf in ['Common', app.config['ENVNAME']]:\n app.config.update(json_config[conf])\n except IOError as e:\n print('IOError loading conf file (file not existing?): ' + secret_conf_com + str(e))\n except ValueError as e:\n print('ValueError loading JSON : ' + secret_conf_com + ' ' + str(e))\n\n #app.config['USERS_EMAILS'] = list(map(lambda x: x+'@gmail.com', app.config['USERS'])) \n # app.logger.error('test error') # <-- This works !!! ",
"def set_up():\n domain = request.args.get('domain', None)\n #TODO: Check domain is valid and user is admin in apps\n client = Client.get_instance()\n admin_email = users.get_current_user().email()\n if not client:\n #If there is no client object, create it\n Client(id=1, primary_domain_name=domain,\n administrators=[admin_email], reply_to=admin_email).put()\n\n return redirect(url_for('start_oauth2_dance'))",
"def setrestricted(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()",
"def main_configure_ad():\n\n print \"------------- AD Configuration Started --------------\"\n network_settings()\n enable_ad()\n create_user_domain()\n create_groups()\n print \"------------- Setup Finished...Please Check idrac_conf.log for any failures --------------\"",
"def set_api_keys(args):\n if not args:\n raise click.ClickException('no arguments')\n elif len(args) % 2:\n raise click.ClickException('odd number of arguments')\n api_keys = dict(zip(args[::2], args[1::2]))\n keys_path = current_app.config['API_KEYS_PATH']\n\n # If file exists, add new keys to old keys, updating them if necessary\n if os.path.exists(keys_path):\n with open(keys_path, 'r') as f:\n try:\n old_keys = json.load(f)\n except json.decoder.JSONDecodeError:\n pass\n else:\n old_keys.update(api_keys)\n api_keys = old_keys\n\n with open(keys_path, 'w') as f:\n json.dump(api_keys, f)\n current_app.config['API_KEYS'] = api_keys\n click.echo('Keys saved')",
"def main(global_config, **settings):\n\n auth_policy = AuthenticationStackPolicy()\n policy_array = []\n\n main_policy = AuthTktAuthenticationPolicy(settings['auth.main.secret'], timeout=1800 * 60,\n cookie_name=settings['auth.main.cookie'])\n auth_policy.add_policy('main', main_policy)\n policy_array.append({'name': 'main', 'policy': main_policy})\n\n assistant_policy = AuthTktAuthenticationPolicy(settings['auth.assistant.secret'], timeout=1800 * 60,\n cookie_name=settings['auth.assistant.cookie'])\n auth_policy.add_policy('assistant', assistant_policy)\n policy_array.append({'name': 'assistant', 'policy': assistant_policy})\n\n # authn_policy = AuthTktAuthenticationPolicy(settings['auth.secret'], cookie_name='formshare_auth_tkt')\n authz_policy = ACLAuthorizationPolicy()\n config = Configurator(settings=settings, authentication_policy=auth_policy,\n authorization_policy=authz_policy)\n\n apppath = os.path.dirname(os.path.abspath(__file__))\n\n config.include('.models')\n # Load and configure the host application\n load_environment(settings, config, apppath, policy_array)\n return config.make_wsgi_app()",
"def enable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n\n cmd = f'aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa authentication login:\\n{e}'\n )",
"def main():\n parser = argparse.ArgumentParser(description='Creates a Mist site within your organization')\n parser.add_argument('config', metavar='config_file', type=argparse.FileType(\n 'r'), help='file containing all the configuration information')\n args = parser.parse_args()\n configs = json.load(args.config)\n\n claim_ap(configs)",
"def configure_aaa_local_auth(device):\n try:\n device.configure([\n \"aaa authentication dot1x default local\",\n \"aaa local authentication default authorization default\",\n \"aaa authorization network default local\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA local auth'\n )",
"def __init__(self, domain, email, password, app):\n self.client = EmailSettingsClient(domain=domain)\n self.client.ClientLogin(email=email, password=password,\n source=app)",
"def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))",
"def set_credentials():",
"def do_genconfig(args):\n\n print(\"========= DEFAULT ========\")\n debug = utils.get_input(\n \"Enable agent in debug mode [y/N]: \") or 'n'\n retry_interval = utils.get_input(\n \"Type the polling interval in seconds for daemon to manage the nodes: \")\n batch_publishing_interval = utils.get_input(\n \"Type the publishing interval in seconds for daemon to push the metrics: \")\n refresh_interval = utils.get_input(\n \"Type the polling interval in seconds to get health status directly from OneView: \")\n scmb_certificate_dir = utils.get_input(\n \"Type the certificates directory to register in OneView SCMB [/var/run/oneview-monasca]: \")\n auth_retry_limit = utils.get_input(\n \"Type the maximum number of attempts to try authenticate in REST API: \")\n\n debug = 'false' if debug == 'n' else 'true'\n retry_interval = retry_interval if retry_interval else \"300\"\n refresh_interval = refresh_interval if refresh_interval else \"180\"\n batch_publishing_interval = batch_publishing_interval if batch_publishing_interval else \"60\"\n\n auth_retry_limit = auth_retry_limit if auth_retry_limit else \"5\"\n scmb_certificate_dir = scmb_certificate_dir if scmb_certificate_dir else \"/var/run/oneview-monasca\"\n\n scmb_certificate_dir = os.path.realpath(os.path.expanduser(scmb_certificate_dir))\n utils.makedirs(scmb_certificate_dir)\n\n print(\"========= Openstack =========\")\n auth_url = utils.get_input(\"Type the Keystone url for authentication: \")\n auth_user = utils.get_input(\"Type the name of your OpenStack user: \")\n auth_password = getpass.getpass(\"Type the password for your OpenStack user: \")\n auth_tenant_name = utils.get_input(\"Type the tenant name that the OpenStack user will be authenticated: \")\n monasca_api_version = utils.get_input(\"Type a version of Monasca API that you want to use [2_0]: \")\n\n monasca_api_version = monasca_api_version if monasca_api_version else \"2_0\"\n\n print(\"========= OneView =========\")\n oneview_manager_url = utils.get_input(\"Type the manager_url for the OneView services: \")\n oneview_username = utils.get_input(\"Type your OneView username: \")\n oneview_password = getpass.getpass(\"Type your OneView user's password: \")\n oneview_insecure = utils.get_input(\"Would you like to allow insecure connections to OneView? [Y/n]: \") or \"Y\"\n max_polling_attempts = utils.get_input(\"Max polling attempts OneView requests: \")\n tls_cacert_file = utils.get_input(\"Path to your CA OneView certificate file, if any: \")\n\n oneview_host = utils.extract_domain_from_service_url(oneview_manager_url)\n oneview_insecure = \"true\" if oneview_insecure.lower() == 'y' else \"false\"\n max_polling_attempts = max_polling_attempts if max_polling_attempts else \"15\"\n\n fault_tolerance_enable = False\n group_name = coordinator_url = None\n while True:\n create = utils.get_input(\"Would you like to enable fault tolerance in the agent? [Y/n] \") or 'y'\n\n if create.lower() == 'y':\n print(\"========= Tooz =========\")\n\n group_name = utils.get_input(\"The group name for tooz configuration: \")\n coordinator_url = utils.get_input(\"The coordinator url for tooz configuration: \")\n fault_tolerance_enable = True\n break\n elif create.lower() == 'n':\n break\n else:\n print(\"Invalid option.\\n\")\n\n config_drivers = {}\n try:\n names = utils.list_names_driver(const.NAMESPACE_DISCOVERY_NODES, log=False)\n except Exception as ex:\n print('\\nCannot load installed drivers - Error caused by %s\\n' % str(ex))\n names = []\n\n for name in names:\n try:\n conf = utils.load_class_by_alias(\n const.NAMESPACE_DISCOVERY_NODES, name, log=False).genconfig()\n\n config_drivers[name.split('_')[-1]] = conf\n except Exception as ex:\n print('\\nCannot generating config file session to driver: %s - Error caused by %s\\n' % (name, str(ex)))\n\n # Write Configuration file #\n config = ConfigParser()\n config.set(\"DEFAULT\", \"debug\", debug)\n config.set(\"DEFAULT\", \"retry_interval\", retry_interval)\n config.set(\"DEFAULT\", \"periodic_refresh_interval\", refresh_interval)\n config.set(\"DEFAULT\", \"batch_publishing_interval\", batch_publishing_interval)\n\n config.set(\"DEFAULT\", \"auth_retry_limit\", auth_retry_limit)\n config.set(\"DEFAULT\", \"scmb_certificate_dir\", scmb_certificate_dir)\n\n if fault_tolerance_enable:\n config.add_section(\"tooz\")\n config.set(\"tooz\", \"group_name\", group_name)\n config.set(\"tooz\", \"coordinator_url\", coordinator_url)\n\n config.add_section(\"openstack\")\n config.set(\"openstack\", \"auth_url\", auth_url)\n config.set(\"openstack\", \"auth_user\", auth_user)\n config.set(\"openstack\", \"auth_password\", auth_password)\n config.set(\"openstack\", \"auth_tenant_name\", auth_tenant_name)\n config.set(\"openstack\", \"monasca_api_version\", monasca_api_version)\n\n config.add_section(\"oneview\")\n config.set(\"oneview\", \"host\", oneview_host)\n config.set(\"oneview\", \"manager_url\", oneview_manager_url)\n config.set(\"oneview\", \"username\", oneview_username)\n config.set(\"oneview\", \"password\", oneview_password)\n config.set(\"oneview\", \"allow_insecure_connections\", oneview_insecure)\n config.set(\"oneview\", \"max_polling_attempts\", max_polling_attempts)\n config.set(\"oneview\", \"tls_cacert_file\", tls_cacert_file)\n\n for driver in config_drivers:\n config.add_section(driver)\n for option, value in config_drivers[driver].items():\n config.set(driver, option, value)\n\n if not args.config_file:\n args.config_file = '~' + os.path.sep + 'oneview_monasca.conf'\n\n filename = utils.get_input(\n \"Type the path of the new configuration file [%s]: \" % args.config_file) or args.config_file\n full_filename = os.path.realpath(os.path.expanduser(filename))\n\n config_dir = os.path.dirname(full_filename)\n utils.makedirs(config_dir)\n\n with open(full_filename, 'w') as configfile:\n config.write(configfile)\n print(\"======\\nFile created successfully on '%s'!\\n======\" % filename)",
"def __init__ (self, email, domain, password):\n\n self.gd_client = gdata.apps.service.AppsService()\n self.gd_client.email = email\n self.gd_client.domain = domain\n self.gd_client.password = password\n self.gd_client.ProgrammaticLogin()",
"def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params",
"def deploy():\n local('appcfg.py --no_cookies [email protected] update .',\n capture=False)",
"def configure(username, password, domain):\n art = r'''\nWelcome! __ ___. .__\n_____ ____ ____ ____ __ __ _____/ |______ \\_ |__ | | ____\n\\__ \\ _/ ___\\/ ___\\/ _ \\| | \\/ \\ __\\__ \\ | __ \\| | _/ __ \\\n / __ \\\\ \\__\\ \\__( <_> ) | / | \\ | / __ \\| \\_\\ \\ |_\\ ___/\n(____ /\\___ >___ >____/|____/|___| /__| (____ /___ /____/\\___ >\n \\/ \\/ \\/ \\/ \\/ \\/ \\/\n '''\n click.secho(art, fg='blue')\n Config(username=username, password=password, domain=domain)",
"def config_war(alternate=False):\n with lcd(env.projectroot):\n sudo(\"mkdir -p /etc/lagrummet.se\")\n if alternate:\n put(\"manage/sysconf/%(target)s/alternate/lagrummet.se-config.groovy\" % env, \"/etc/lagrummet.se\",\n use_sudo=True)\n else:\n put(\"manage/sysconf/%(target)s/etc/lagrummet.se/lagrummet.se-config.groovy\" % env, \"/etc/lagrummet.se\",\n use_sudo=True)"
] | [
"0.59719497",
"0.5872565",
"0.57970774",
"0.55599004",
"0.54469526",
"0.54291654",
"0.54122066",
"0.5368892",
"0.5367906",
"0.53531665",
"0.53369606",
"0.5290121",
"0.5286907",
"0.52539235",
"0.5224465",
"0.5185917",
"0.5123865",
"0.509597",
"0.5063615",
"0.50545233",
"0.5033412",
"0.5031019",
"0.5010219",
"0.49927148",
"0.49842268",
"0.49804243",
"0.49788135",
"0.49779928",
"0.496546",
"0.49353403"
] | 0.6536116 | 0 |
Login to the appliance as the specified user by certificate [Arguments] | def fusion_api_two_factor_login_appliance(self, host, cert, headers=None):
return self.loginsession.login_by_cert(host, cert, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login(username, password, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run([\n 'devpi', 'login', '--clientdir', clientdir,\n username, '--password', password])",
"def do_login(cs, args):\n resp = cs.users.login(args.username, args.password, cs.baseurl)\n if resp.status_code == 200:\n print(\"Successfully login, session id: %s\" %\n resp.cookies.get('beegosessionID'))\n else:\n print(\"Failed to login! Please re-check your username and password\")",
"def loginUser(SID, username, userpass):\n return call(\"loginUser\", SID, username, userpass)",
"def login(self, **kwargs):\n\tusername = kwargs.get('username', self.username)\n\tif not username:\n\t raise RuntimeError, 'no username provided'\n\n\tpassword = kwargs.get('password', self.password)\n\tif not password:\n\t raise RuntimeError, 'no password provided'\n\tself.call('login', username=username, password=password)",
"def do_login(user, password):\n return um.do_login(user, password)",
"def do_login(self, backend, user):",
"def login(args, syn):\n syn.login(args.synapseUser, args.synapsePassword, rememberMe=args.rememberMe)",
"def login(email, password):\n rino.login.login(email, password)",
"def login(email, password):\n rino.login.login(email, password)",
"def __login(self, args = []):\n\n try:\n \n # Send username and wait for an ACK\n self.__cm.send(p.T_USER, [self.__username])\n reply = self.__cm.receive()\n \n if (reply.type != p.T_ACK):\n raise Exception, \"Unable to login!\"\n\n # Send password and wait for an ACK\n self.__cm.send(p.T_PASS, [self.__password])\n reply = self.__cm.receive()\n \n if (reply.type != p.T_ACK):\n raise Exception, \"Invalid credentials!\"\n\n except Exception,e:\n self.__handleError('Authenticate', e)",
"def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))",
"def login(self, username, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/local/login\"\n\t\tpayload = {\"username\": username, \"password\": password}\n\t\treturn(postUrl(url, self.credentials, payload))",
"def signin(self, username=None, password=None):\n try:\n loadAPI_key(username, password)\n databases.checkUser(username, password)\n print(\"API KEY\")\n signing_key = cherrypy.session['signing_key']\n\n pubkey_hex = signing_key.verify_key.encode(encoder=nacl.encoding.HexEncoder)\n pubkey_hex_str = pubkey_hex.decode('utf-8')\n\n message_bytes = bytes(pubkey_hex_str + username, encoding='utf-8')\n signed = signing_key.sign(message_bytes, encoder=nacl.encoding.HexEncoder)\n signature_hex_str = signed.signature.decode('utf-8')\n\n addPubkey(pubkey_hex_str, signature_hex_str)\n\n error = authoriseUserLogin(pubkey_hex_str)\n\n headers = {\n 'X-username': username,\n 'X-apikey': cherrypy.session['api_key'],\n 'Content-Type': 'application/json; charset=utf-8',\n }\n\n loginserver_record_get = requests.get(url=\"http://cs302.kiwi.land/api/get_loginserver_record\", headers=headers).json()\n loginserver_record = loginserver_record_get[\"loginserver_record\"]\n\n print(error)\n if error != 1:\n cherrypy.session['pubkey_hex_str'] = pubkey_hex_str\n cherrypy.session['signature_hex_str'] = signature_hex_str\n cherrypy.session['loginserver_record'] = loginserver_record\n getListAPI()\n userList = listUsers()\n requests.get(url=\"http://cs302.kiwi.land/api/check_pubkey\", headers=headers)\n ping()\n raise cherrypy.HTTPRedirect('/')\n else:\n raise cherrypy.HTTPRedirect('/login?bad_attempt=1')\n except:\n raise cherrypy.HTTPRedirect('/index')",
"def login(username=None, password=None):\n username = username or os.environ.get('LDS_USERNAME')\n password = password or os.environ.get('LDS_PASSWORD')\n\n s = requests.Session()\n url = service_config()['auth-url']\n data = {'username': username, 'password': password}\n r = s.post(url, data, allow_redirects=False)\n if r.status_code != 200:\n raise AuthenticationError(r)\n return s, fetch_current_user_detail(s)",
"def login(self, username, password):\n return self.app.post('/login', data = dict(\n username = username,\n password = password\n ), follow_redirects = True)",
"def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])",
"def login(self, username, password):\n\n cred = {\"email\": username, \"passwd\": password}\n\n return self.post(\"login\", cred)",
"def login():",
"def login():",
"def login(**kwargs):\n root_commands.cmd_login(**kwargs)",
"def login_with_credentials(username,password):\r\n cr=chrome.Chrome(username=username,password=password,disable_default_apps=False,autotest_ext=True)\r\n return cr",
"def perform_login(self, user_name, user_pass):\n if self.api_type == 'real':\n self.input_user_name(user_name)\n self.input_user_pass(user_pass)\n self.click_login_button()",
"def login(username=None, password=None):\n session = requests.Session()\n if username and password:\n user_credentials = {'username': username,\n 'password': password,\n 'language': 'UA'}\n else:\n user_credentials = get_user_credentials()\n response = perform_post_request(session,\n Urls.LOGIN.value,\n user_credentials,\n get_headers())\n return response",
"def main():\n parser = optparse.OptionParser('-u username ' + '-p password')\n parser.add_option('-u', dest='username', type='string', help=\"Game username- enter within '' marks\")\n parser.add_option('-p', dest='password', type='string', help=\"Game password- enter within '' marks\")\n (options, args) = parser.parse_args()\n u_name = options.username\n p_word = options.password\n # if no username or password is given, print out the usage\"\n if u_name is None or p_word is None:\n print(parser.usage)\n exit(0)\n\n login(u_name, p_word)",
"def login(username, password, store=True):\r\n url = '{}/login'.format(USGS_API_ENDPOINT)\r\n payload = {\r\n \"jsonRequest\": payloads.login(username, password)\r\n }\r\n logger.debug(\"API call URL: {}\".format(url))\r\n logger.debug(\"API call payload hidden.\")\r\n resp = requests.post(url,payload)\r\n if resp.status_code is not 200:\r\n raise USGSError(resp.text)\r\n response = resp.json()\r\n logger.debug(\"Received response:\\n{}\".format(json.dumps(response, indent=4)))\r\n apiKey = response[\"data\"]\r\n\r\n if apiKey is None:\r\n raise USGSError(response[\"error\"])\r\n \r\n if store:\r\n logger.debug(\"Writing API key to file {}\".format(KEY_FILE))\r\n with open(KEY_FILE, \"w\") as f:\r\n f.write(apiKey)\r\n \r\n return response",
"def login(self, username, password):\n return self.app.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)",
"def fusion_api_login_appliance(self, host, creds, headers=None):\n # logger._log_to_console_and_log_file(\"Logging into appliance\")\n return self.loginsession.login(host, creds, headers)",
"def initiateAuthentication(identity_url, return_to=None):",
"def login(args, config):\n \n api = config['API']\n username = args.__dict__.get('user')\n if not username:\n username = input('Enter username: ')\n password = getpass.getpass('Enter password: ')\n # send POST request to login with username and password\n pld = {'username': username, 'password': password}\n h = {'blank': 'header'}\n r = Request(api['login'], data=urlencode(pld).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() == 401:\n print('Your account is not authorized for this action')\n if e.getcode() == 406: # unacceptable\n print('406 mate, you sent something bad')\n print('Bad login detected. Please check your username/password.')\n return\n hdr_in = {}\n for i in resp.getheaders():\n hdr_in[i[0]] = i[1] # create dict from list of tuples \n token = hdr_in.get('token')\n exp = hdr_in.get('exp') # expiration\n _ex = datetime.fromtimestamp(int(exp))\n ex = _ex.strftime('%Y-%m-%dT%H:%M:%S')\n # write JWT to local tempfile--can be overwritten with new JWTs\n # TODO make tempfile ~/.jwt or something\n tmp = 'jwt.tmp'\n pth = os.getcwd()\n with open(os.path.join(pth, tmp), 'w+') as _jwt:\n _jwt.write(token) # write token to file\n expr = ' Your session will expire at {} '.format(ex)\n m = '\\n{:*^80}\\n{:*^80}\\n'.format(' Welcome to FLEET, {} '.format(username), expr)\n print(m)",
"def login(userID, robotID, password): #@NoSelf"
] | [
"0.63482815",
"0.62892044",
"0.6265031",
"0.6240665",
"0.6207639",
"0.61395174",
"0.611391",
"0.5990242",
"0.5990242",
"0.5975603",
"0.5896115",
"0.5886969",
"0.58474463",
"0.58398",
"0.58342856",
"0.5804081",
"0.578703",
"0.5779355",
"0.5779355",
"0.5769054",
"0.5764178",
"0.5756839",
"0.57391304",
"0.5723616",
"0.571848",
"0.57083786",
"0.5706201",
"0.5705755",
"0.56992316",
"0.56834966"
] | 0.6567106 | 0 |
Returns the list of active user sessions. You can use Fusion Api Switch Active User to any of these users. [Example] ${resp} = Fusion Api Get Active Sessions | def fusion_api_get_active_sessions(self):
return self.loginsession.get_active_sessions() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_active_user_sessions(self, param='', api=None, headers=None):\n return self.usersessions.get(api=api, headers=headers, param=param)",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions",
"def sessions(self):\n return utils.listItems(self, '/status/sessions')",
"def get_active_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == True).fetch()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200",
"def get_in_active_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200",
"def return_active_users():\n return json.dumps(app.active_users)",
"def get_current_users(self):\n active_sessions = Session.objects.filter(expire_date__gte=timezone.now())\n user_id_list = []\n for session in active_sessions:\n data = session.get_decoded()\n user_id_list.append(data.get('_auth_user_id', None))\n # Query all logged in users based on id list\n return self.filter(id__in=user_id_list)",
"def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()",
"def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body",
"def active_sessions(self):\n skey = self.r_key('active_sessions')\n sessions_to_expire = []\n for user_id in self.r_server.smembers(skey):\n ukey = self.r_key('session', user_id)\n if self.r_server.exists(ukey):\n yield user_id, self.load_session(user_id)\n else:\n sessions_to_expire.append(user_id)\n\n # clear empty ones\n for user_ids in sessions_to_expire:\n self.r_server.srem(skey, user_id)",
"async def get_in_active_users_async(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch_async().get_result()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200",
"def get_or_create_sessions(self):\n\t\tpath = f'{self.BIKE_ENDPOINT}user/current/session?{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response",
"def active_users(self, *args, **kwargs):\r\n return self._get('ActiveUsers', *args, **kwargs)",
"def get_all_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query().fetch()]\n message: str = 'successfully retrieved active users'\n return jsonify({'status': True, 'payload': users_list, 'message': message}), 200",
"def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])",
"async def get_active_users_async(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == True).fetch_async().get_result()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200",
"def get(self):\n\n response = openvidu().list_sessions()\n\n if response.status_code == 200:\n return response.json()[\"content\"]\n abort(response)",
"def list(self, request, *args, **kwargs):\n self.check_authentication(request)\n serializer = SessionSerializer(\n context={\"request\": request, \"view\": self},\n instance=[_Session(request)],\n many=True,\n )\n return Response(serializer.data)",
"def get_sessions(url: str, token: str) -> List[Session]:\n sessions_url = f'{url}api/sessions'\n response = requests.get(sessions_url, params={'token': token})\n assert(response.status_code == 200)\n sessions_raw = json.loads(response.text)\n sessions = []\n for session_raw in sessions_raw:\n session = Session(\n path = session_raw['path'],\n last_activity = dateutil.parser.isoparse(session_raw['kernel']['last_activity']),\n execution_state = session_raw['kernel']['execution_state']\n )\n assert(session['execution_state'] in valid_execution_states)\n sessions.append(session)\n\n sessions.sort(key=lambda session: session['last_activity'], reverse=True)\n return sessions",
"def active_users(request):\n user_id = BhagirathSession.objects.all().filter(logout_timestamp=None).values('user').distinct(true_or_false=True)\n \n dict = {}\n list = []\n for i in user_id:\n k = User.objects.get(pk=i['user'])\n j = BhagirathSession.objects.all().filter(user=k,logout_timestamp=None)\n dict['username'] = k.username\n dict['login_timestamp'] = j[0].login_timestamp\n list.append(dict)\n data = {\n 'active_users_list':list,\n 'count':len(list)\n }\n return render_to_response('my_admin_tools/menu/active_users.html',data,context_instance=RequestContext(request))",
"def get_sessions(self):\n return self.current_sessions",
"def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)",
"def get_sessions(self):\n\n return self.all_sessions",
"def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions",
"def list(self, status: Optional[str] = None) -> SessionList:\n filter = {\"status\": status} if status else None\n return self._list(list_cls=SessionList, resource_cls=Session, method=\"GET\", filter=filter)",
"def users_active(self):\n return self.users(\"inactive == NO\")",
"def get_user_sessions(base_url, group_id, token, user_id):\n url = base_url + route_user_sessions.format(user_id=user_id)\n response = requests.get(url, headers=headers(group_id, token))\n return response",
"def active_sessions(request, order_number):\n account = get_object_or_404(User, username=request.user)\n order = get_object_or_404(Order, order_number=order_number)\n\n # CHECK FOR ACTIVE SESSIONS\n lineitems = OrderLineItem.objects.filter(order=order)\n session = None\n\n current_ts = datetime.datetime.now(tz=pytz.timezone('UTC'))\n\n for item in lineitems:\n seconds_until = (item.start_datetime - current_ts).total_seconds()\n # IF WITHIN 5 MIN OF SESSION START TIME, OR CURRENT TIME IS START TIME,\n # OR CURRENT TIME IS BETWEEN START AND END TIME\n if (seconds_until < 300 or current_ts == item.start_datetime or\n current_ts > item.start_datetime and current_ts < item.end_datetime):\n session = item\n if session:\n context = {\n 'account': account,\n 'order': order,\n 'session': session,\n }\n return render(request, 'active_sessions/active_sessions.html', context)\n else:\n return redirect('/profile')",
"def fusion_api_get_session_info(self, api=None, headers=None, param='', sessionID=None):\n return self.sessions.get(api, headers, param, sessionID)"
] | [
"0.7700086",
"0.6694936",
"0.6650115",
"0.66037244",
"0.65487003",
"0.6505804",
"0.6464903",
"0.6437998",
"0.6437205",
"0.64119065",
"0.6211409",
"0.6160088",
"0.614697",
"0.6115182",
"0.6084786",
"0.6073577",
"0.6058707",
"0.5989116",
"0.5967596",
"0.59588283",
"0.5904886",
"0.5899066",
"0.5895072",
"0.58795005",
"0.58612335",
"0.5858451",
"0.5852692",
"0.5824005",
"0.5802777",
"0.5802417"
] | 0.72672766 | 1 |
Returns the current active user [Example] ${resp} = Fusion Api Get Active User | def fusion_api_get_active_user(self):
return self.loginsession.get_active_user() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user():\n userdict = jsonify2(current_user.db_user, 'User')\n return current_app.bitjws.create_response(userdict)",
"def get_user():\n\treturn '1', 200",
"def getCurrentUser():\n return jsonify({\n 'username': g.user\n })",
"def fusion_api_get_user(self, uri=None, param='', api=None, headers=None):\n return self.user.get(uri=uri, api=api, headers=headers, param=param)",
"def get_current_user():\n token = request.headers['token']\n decoded_token = decode_token(token)\n userId = decoded_token[\"userId\"]\n for user_obj in users_table:\n if user_obj.userId == userId:\n return {\"userId\": userId, \"isAdmin\": user_obj.isAdmin}",
"def get():\n return prepare_response(get_user_info())",
"def get_user(self):\n\n r = requests.get(\n self._url('/usermanagement/userinfo'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n user = r.json()\n log.info('user {:s} currently logged in'.format(user['login']))\n\n return user",
"def get(self):\n\n user = None\n if self.request.headers.get('X-Pp-User'):\n user = self.request.headers['X-Pp-User']\n\n result_json = {\n \"user\": user\n }\n\n self.success(result_json)",
"def authcheck():\n user = get_user()\n return jsonify({'current_identity': user.username})",
"def request_user_info():\n session = requests.Session()\n session.headers = {\n 'Authorization': f'Bearer {current_access_token}',\n }\n retries = Retry(\n total=5, connect=3, read=3, status=3,\n status_forcelist=[408, 500, 502, 503, 504],\n backoff_factor=0.2,\n respect_retry_after_header=True,\n )\n base_url = current_app.config['AUTH0_BASE_URL']\n adapter = requests.adapters.HTTPAdapter(max_retries=retries)\n session.mount(base_url, adapter)\n\n info_request = session.get(base_url + '/userinfo', timeout=3.0)\n\n info_request.raise_for_status()\n user_info = info_request.json()\n return user_info",
"def get_current_user(no_auth, app):\n\n if no_auth:\n logging.info(\"NO AUTH enabled. get_current_user\")\n accounts = app.data.driver.db['user']\n user = accounts.find_one({\"last_name\": \"Doe\"})\n else:\n user = app.auth.get_request_auth_value()\n\n return user",
"def get_user():\n filters = make_filters(FilterType.AND, request.json)\n user = user_service.get_user(filters)\n if not user:\n response = {\n \"status\": False,\n \"message\": \"No se encontro al usuario que intentas buscar\",\n }\n return make_response(jsonify(response), 404)\n response = {\"status\": True, \"user\": user}\n resp = make_response(dumps(response), 200)\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp",
"def getUser():\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"",
"def get_current_user(request):\n if 'user-id' in request.session and hasattr(request, 'dbsession'):\n return request.dbsession.query(User).filter(and_(User.id == request.session['user-id'],\n User.status == 'active')).first()\n return None",
"def getUser():\n\t\tuser = users.get_current_user()\n\t\tuserList = db.GqlQuery(\"SELECT * FROM AppUser WHERE id = :1 LIMIT 1\",\n\t\t\t\t\t\t\tuser).fetch(1)\n\t\tif userList == []:\t\t# Wasn't found\n\t\t\treturn AppUser.registerUser()\n\t\treturn userList[0]",
"def fetch_current_user_detail(s):\n url = service_config()['current-user-detail']\n r = s.get(url)\n r.raise_for_status()\n return r.json()",
"def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user",
"async def get_user_account(self):\n uri = \"/fapi/v1/account\"\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error",
"def current_user_info():\n\n return current_user",
"def api_get_current_user(**kwargs):\n response = jsonify({\n 'currentUser': kwargs['current_user'].to_dict(),\n })\n response.status_code = 200\n return response",
"def get_current_user():\n handler = FakeRequest()\n return handler.get_current_user()",
"def user_info(self):\n return self.auth.get_user_by_session()",
"def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()",
"def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user",
"def get(self):\n response = users_db.get_user_by_id(get_jwt_identity())\n return Response(dumps(response), mimetype='application/json')",
"def user_return():\n\n #take csrf-token from cookies\n token = request.cookies.get('token')\n #decoding token\n uuid = jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])['user_id']\n #get current user\n user = User.query.filter_by(uuid=uuid).first()\n return user",
"def get_user():\n try:\n if 'gauth_token' in session:\n response = authenticate_with_users_service(\n session['gauth_token'])\n if response.status_code == 201:\n return response.json()\n return None # Not signed in\n except requests.exceptions.ConnectionError:\n return None # Can't connect to users service",
"def get_logged_info():\n user = current_identity\n return make_response(dumps({\"status\": True, \"user\": user}), 200)",
"def get_user(username):\n return jsonify(admin.get_user_info(current_app.scoped_session(), username))",
"def get(self):\n user = get_current_user()\n\n if user is None:\n context = {\n 'authenticated': False,\n }\n else:\n context = {\n 'authenticated': True,\n 'user': user,\n }\n\n return self.respond(context)"
] | [
"0.7438861",
"0.72328675",
"0.68916947",
"0.68579465",
"0.6833786",
"0.6794887",
"0.6662057",
"0.6657314",
"0.6634899",
"0.65766335",
"0.65574354",
"0.65446764",
"0.6523515",
"0.65150505",
"0.64918685",
"0.6485048",
"0.64533794",
"0.64223474",
"0.64085007",
"0.6406101",
"0.6401062",
"0.63886917",
"0.63773715",
"0.6350684",
"0.6345705",
"0.63430786",
"0.63405055",
"0.63377625",
"0.6333036",
"0.6331714"
] | 0.7599319 | 0 |
Set the given sessionId as current active sessionID. [Arguments] | def fusion_api_set_active_session(self, sessionId):
return self.loginsession.set_active_session(sessionId) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")",
"def session_id(self, session_id):\n\n self._session_id = session_id",
"def setSession( self, name, value, REQUEST=None, cookie=None ):\n SetSessionValue( self, name, value, REQUEST, cookie )",
"def do_SetSessionName (self, line):\r\n OpensslTracking.session = line",
"def current_session(self, session):\n if self._session is None:\n self._session = session\n else:\n if session is None or self._session.session_id != session.session_id:\n self._session.active = False\n self._session = session",
"def set_session(context, key, value):\n session_manager = getToolByName(context, 'session_data_manager')\n session = session_manager.getSessionData()\n session[key] = value",
"def test_set_session_id_unicode(self, context):\n pytest.deprecated_call(context.set_session_id, \"abc\")",
"def set_user_id(uid):\n local.user_id = uid",
"def vscr_ratchet_group_session_set_my_id(self, ctx, my_id):\n vscr_ratchet_group_session_set_my_id = self._lib.vscr_ratchet_group_session_set_my_id\n vscr_ratchet_group_session_set_my_id.argtypes = [POINTER(vscr_ratchet_group_session_t), vsc_data_t]\n vscr_ratchet_group_session_set_my_id.restype = None\n return vscr_ratchet_group_session_set_my_id(ctx, my_id)",
"def set_session_cookie(self):\n self.driver.get('{domain}/home/learn/index#/{cid}/go'.format(domain=domain,cid=cid))\n for subCookie in self.driver.get_cookies():\n self.session.cookies.set(subCookie[u'name'], self.driver.get_cookie(subCookie[u'name'])['value'])\n if config.DEBUG:\n print \"session cookies :: \\n{}\".format(self.session.cookies)",
"def set_sessid(sessid):\n filename = path.join(path.expanduser('~'), '.profrc')\n config = configparser.ConfigParser()\n config.read(filename)\n config.set('DEFAULT', 'Session', sessid)\n with open(filename, 'w') as configfile:\n print(\"write a new sessid\")\n config.write(configfile)",
"def for_session(self, session_id):\n if not isinstance(session_id, str):\n raise TypeError('Session Id must be a string')\n\n self.token['sessionId'] = session_id\n\n return self",
"def set(self, session):\n raise InvalidSessionException('Need to be implemented')",
"def test_modify_anonymous_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='123456'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])",
"async def set_session(self,ctx,stype,*,text): \n if stype == \"main\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.main.set(text)\n elif stype == \"red\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.red.set(text)\n elif stype == \"amber\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.amber.set(text)\n elif stype == \"green\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.green.set(text)\n else:\n await ctx.send(\"invalid team\")",
"def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()",
"def set_user_cookie_id():\n #new fresh user\n if not request.cookies.get(config.COOKIE_ADSABS2_NAME):\n if current_user.is_anonymous():\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()\n #the user has already visited the web site\n else:\n if current_user.is_anonymous():\n #if the cookie is a valid UUID it's ok\n curr_cookie = request.cookies.get(config.COOKIE_ADSABS2_NAME)\n try:\n uuid.UUID(curr_cookie)\n g.user_cookie_id = curr_cookie\n #otherwise the app generates a new one\n except ValueError:\n g.user_cookie_id = unicode(uuid.uuid4())\n else:\n g.user_cookie_id = current_user.get_id()",
"def sid(self, sid):\n self._sid = sid",
"def set_current_user(self, user):\n self.session['u'] = user.get().key.urlsafe()",
"def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])",
"def do_session(self, name):\n if name != \"\":\n self.session = name\n else:\n print('\\n'+self.session+'\\n')",
"def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session",
"def setSessionParameters(self,\n url=None,\n origin=None,\n protocols=None,\n useragent=None,\n headers=None,\n proxy=None):",
"def setIdentity(self) -> None:\n ...",
"def test_set_session_id_fail(self, context):\n with pytest.raises(Error) as e:\n context.set_session_id(b\"abc\" * 1000)\n\n assert e.value.args[0][0] in [\n # 1.1.x\n (\n \"SSL routines\",\n \"SSL_CTX_set_session_id_context\",\n \"ssl session id context too long\",\n ),\n # 3.0.x\n (\n \"SSL routines\",\n \"\",\n \"ssl session id context too long\",\n ),\n ]",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def session(self, value: ClientSession):\r\n self._session = value"
] | [
"0.69700724",
"0.6181397",
"0.5990021",
"0.58904654",
"0.5849663",
"0.58051276",
"0.5730266",
"0.5649466",
"0.56187475",
"0.5562823",
"0.5540941",
"0.5515735",
"0.5503887",
"0.5453762",
"0.5404824",
"0.5399669",
"0.5393314",
"0.53604776",
"0.53512084",
"0.535011",
"0.53450435",
"0.53140455",
"0.53139246",
"0.53073376",
"0.53066325",
"0.5277577",
"0.5277577",
"0.5277577",
"0.5277577",
"0.5268753"
] | 0.7790086 | 0 |
Updates a Managed SAN. [Arguments] | def fusion_api_update_managed_san(self, body, uri, api=None, headers=None):
return self.ms.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_san_manager(self, body, uri, api=None, headers=None):\n return self.dm.update(body, uri, api, headers)",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def test_update_nas_share(self):\n pass",
"def vm_update(args):\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_descrmation(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)",
"def update(self, code, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for CostCenter are missing')\n\n attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, cost_center = self.http_client.put(\"/costcenters/{code}\".format(code=code), body=attributes)\n return cost_center",
"def fusion_api_update_server_certificate(self, aliasname, body, api=None, headers=None):\n return self.server_certificate.put(aliasname, body, api, headers)",
"def fusion_api_edit_datacenter(self, body, uri, api=None, headers=None):\n return self.dc.update(body, uri, api, headers)",
"def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)",
"def test_update_nas_share_by_nas(self):\n pass",
"def update(domain_name=None, primary_ns=None, admin_mail=None, refresh=None,\n retry=None, expire=None, default_ttl=None, patch=False, **kwargs):\n url = 'https://api.cloudns.net/dns/modify-soa.json'\n\n params = Parameters({\n 'domain-name': domain_name,\n 'primary-ns': primary_ns,\n 'admin-mail': admin_mail,\n 'refresh': {\n 'value': refresh,\n 'min_value': 1200,\n 'max_value': 43200,\n },\n 'retry': {\n 'value': retry,\n 'min_value': 180,\n 'max_value': 2419200,\n },\n 'expire': {\n 'value': expire,\n 'min_value': 1209600,\n 'max_value': 2419200,\n },\n 'default-ttl': {\n 'value': default_ttl,\n 'min_value': 60,\n 'max_value': 2419200,\n },\n })\n\n return requests.post(url, params=params.to_dict())",
"def put(self, *args, **kwargs):\n\n addr = EtherAddress(args[0])\n\n if 'desc' in kwargs:\n self.service.update(addr, kwargs['desc'])\n else:\n self.service.update(addr)",
"def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):",
"def sync_device_to_ralph3(data):\n dca = ImportedObjects.get_object_from_old_pk(DataCenterAsset, data['id'])\n if 'hostname' in data:\n dca.hostname = data['hostname']\n if 'management_ip' in data:\n _handle_management_ip(\n dca, data.get('management_ip'), data.get('management_hostname')\n )\n if 'service' in data and 'environment' in data:\n dca.service_env = _get_service_env(data)\n if 'venture_role' in data:\n dca.configuration_path = _get_configuration_path_from_venture_role(\n venture_role_id=data['venture_role']\n )\n dca.save()\n if 'custom_fields' in data:\n for field, value in data['custom_fields'].items():\n dca.update_custom_field(field, value)\n if 'ips' in data:\n _handle_ips(dca, data['ips'])",
"def update(\n self,\n Enabled=None,\n InternalRootPathCost=None,\n Mac=None,\n PortPriority=None,\n Priority=None,\n UpdateRequired=None,\n VlanId=None,\n ):\n # type: (bool, int, str, int, str, bool, int) -> Vlan\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def update_volume( opencloud_volume ):\n\n client = connect_syndicate()\n\n vol_name = opencloud_volume.name\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive\n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n rc = client.update_volume( vol_name,\n description=vol_description,\n private=vol_private,\n archive=vol_archive,\n default_gateway_caps=vol_default_gateway_caps )\n\n if not rc:\n raise Exception(\"update_volume(%s) failed!\" % vol_name )\n\n except Exception, e:\n # transort or method error \n logger.exception(e)\n return False\n\n else:\n return True",
"def updatesid(dn, sid, l):\n mod_attrs = [(ldap.MOD_REPLACE, 'sambaSID', sid )]\n l.modify_s(dn, mod_attrs)",
"def _subject_member_update(context, memb_ref, values, session=None):\n _drop_protected_attrs(models.SubjectMember, values)\n values[\"deleted\"] = False\n values.setdefault('can_share', False)\n memb_ref.update(values)\n memb_ref.save(session=session)\n return memb_ref",
"async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def test_update_member(self):\r\n resource = 'member'\r\n cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })",
"def update_stcs(hostname, username, password, src: str, dst: str = 'ITM.ITE.DEV.PROCLIB', member_mask: str = None,\n members: str = Union[str, List[str]],\n libraries: Union[str, List[str]] = None):\n z = zOSMFConnector(hostname, username, password)\n zftp = ZFTP(hostname, username, password)\n if member_mask is not None:\n list_job = z.list(src, member_pattern=member_mask)\n elif isinstance(members, str):\n list_job = [members]\n else:\n list_job = members\n if isinstance(libraries, str):\n libs = [libraries]\n else:\n libs = libraries\n for job in list_job:\n data = z.read_ds(f'{src}({job})')\n for library in libs:\n if library not in data:\n name_lib = re.findall(r'(?<=\\.\\w)(\\w+)(?!\\.)', library)[-1]\n if data.find('&BASEHLEV.'+name_lib):\n if 'APF' in job:\n entries = re.findall(rf'//\\s+DSNAME=&BASEHLEV.{name_lib},', data)\n start=0\n for entry in entries:\n index_start = re.search(entry, data[start:]).start() + start\n data = data[:index_start] + f'// DSNAME={library},SMS\\n// SETPROG APF,ADD,\\n' + data[index_start:]\n start=re.search(entry, data).end()\n else:\n entries = re.findall(rf'//\\s+DSN=&BASEHLEV.{name_lib}\\s', data)\n start=0\n for entry in entries:\n index_start = re.search(entry, data[start:]).start() + start\n data = data[:index_start] + f'// DSN={library}\\n// DD DISP=SHR,\\n' + data[index_start:]\n start = re.search(entry, data).end()\n count = 0\n for library in libs:\n if library in data:\n count += 1\n logger.info(f\"{library} added to {job}\")\n if count == 0:\n logger.info(f\"{job} doesn't have libraries\")\n elif count > 0:\n logger.info(f\"Writing {dst}({job})\")\n zftp.upload_ds(text=data, dest=f'{dst}({job})')",
"def do_put(args):\n session = BMC(server=args.server, username=args.username, password=args.password)\n if session.put(\"{0}/attr/{1}\".format(args.path, args.attr), args.value):\n do_get(args)",
"def catalog_update_request(table_name: str, stac_item_key: str):\n\n get_client(\"dynamodb\").put_item(\n TableName=table_name,\n Item={\n \"stacitem\": {\"S\": stac_item_key},\n \"datetime\": {\"S\": str(datetime.datetime.now())},\n },\n )",
"def edit_device(\n self,\n address: Any = None,\n duty1: Any = None,\n duty2: Any = None,\n freq1: Any = None,\n freq2: Any = None\n ) -> requests.Response:\n params = {\n 'address': address,\n 'duty1': duty1,\n 'duty2': duty2,\n 'freq1': freq1,\n 'freq2': freq2\n }\n return self._call('PATCH', '/devices', params=params)",
"def update_cert(c, stack_name, domain_name, profile, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-cert',\n '--template-body', f'file://cert.yaml',\n '--parameters',\n f'ParameterKey=DomainName,ParameterValue={domain_name}',\n f'--profile', f'{profile}')\n # Cert also needs adding to us-east-1 to be used by CloudFront\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-cert',\n '--template-body', f'file://cert.yaml',\n '--parameters',\n f'ParameterKey=DomainName,ParameterValue={domain_name}',\n f'--profile', f'{profile}',\n '--region', 'us-east-1')",
"def update_entity(self, device_id, data):\n url = '{}/ngsi-ld/v1/entities/{}/attrs'.format(self.url, device_id)\n return self.post(url, data=data, headers=self.headers_ld)",
"def update(domain_id, name, sensitive):\n domain = get(domain_id)\n domain.name = name\n domain.sensitive = sensitive\n database.update(domain)",
"async def do_update(self, data):\n old = await self.config()\n\n new = old.copy()\n new.update(data)\n\n verrors = ValidationErrors()\n\n for attr, minlen, maxlen in (\n ('access_key', 5, 20),\n ('secret_key', 8, 40),\n ):\n curlen = len(new.get(attr, ''))\n if curlen < minlen or curlen > maxlen:\n verrors.add(\n f's3_update.{attr}', f'Attribute should be {minlen} to {maxlen} in length'\n )\n\n if not new['storage_path']:\n verrors.add('s3_update.storage_path', 'Storage path is required')\n else:\n await check_path_resides_within_volume(\n verrors, self.middleware, 's3_update.storage_path', new['storage_path']\n )\n\n if not verrors:\n if new['storage_path'].rstrip('/').count('/') < 3:\n verrors.add(\n 's3_update.storage_path',\n 'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'\n )\n else:\n # If the storage_path does not exist, let's create it\n if not os.path.exists(new['storage_path']):\n os.makedirs(new['storage_path'])\n\n if new['certificate']:\n verrors.extend((await self.middleware.call(\n 'certificate.cert_services_validation', new['certificate'], 's3_update.certificate', False\n )))\n\n if new['bindip'] not in await self.bindip_choices():\n verrors.add('s3_update.bindip', 'Please provide a valid ip address')\n\n if verrors:\n raise verrors\n\n new['disks'] = new.pop('storage_path')\n\n await self._update_service(old, new)\n\n if (await self.middleware.call('filesystem.stat', new['disks']))['user'] != 'minio':\n await self.middleware.call(\n 'filesystem.setperm',\n {\n 'path': new['disks'],\n 'mode': str(775),\n 'uid': (await self.middleware.call('dscache.get_uncached_user', 'minio'))['pw_uid'],\n 'gid': (await self.middleware.call('dscache.get_uncached_group', 'minio'))['gr_gid'],\n 'options': {'recursive': True, 'traverse': False}\n }\n )\n\n return await self.config()",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def put(self, did):\n dev = Device.query.filter(Device.id == did).one_or_none()\n if dev is None:\n lista = request.get_json(force=True)\n dev.name = lista['name']\n dev.marca = lista['marca']\n dev.model = lista['model']\n dev.serial_number = lista['serial_number']\n dev.description = lista['description']\n dev.system = lista['system']\n dev.teamviwer = lista['teamviwer']\n dev.location = lista['location']\n dev.type_device = lista['type_device']\n dev.active = lista['active']\n try:\n dev.update()\n except Exception as e:\n print(e)\n return {'error': 'Lo sentimos un error a ocurrido!'}, 500\n return dev.json()\n else:\n return None, 404",
"def update_monitoring(c, stack_name, subdomain, profile, cert_arn=None, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-monitoring',\n '--template-body', f'file://monitoring.yaml',\n '--capabilities', 'CAPABILITY_NAMED_IAM',\n '--parameters',\n f'ParameterKey=Subdomain,ParameterValue={subdomain}',\n f'ParameterKey=CertificateArn,ParameterValue={cert_arn if cert_arn else \"\"}',\n f'--profile', f'{profile}')"
] | [
"0.5492186",
"0.5166736",
"0.5111948",
"0.50958365",
"0.50700927",
"0.50695395",
"0.505297",
"0.49638778",
"0.49325725",
"0.4920512",
"0.4913717",
"0.4898047",
"0.48851603",
"0.48770708",
"0.4857135",
"0.48554",
"0.48477212",
"0.4836657",
"0.47670794",
"0.47574732",
"0.47472918",
"0.47305927",
"0.4724632",
"0.46780202",
"0.46768457",
"0.46712705",
"0.46466026",
"0.4646482",
"0.46365783",
"0.4621512"
] | 0.6193381 | 0 |
Gets a default or paginated collection of Managed SANs. [Arguments] | def fusion_api_get_managed_san(self, uri=None, param='', api=None, headers=None):
return self.ms.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_san_manager(self, uri=None, param='', api=None, headers=None):\n return self.dm.get(uri=uri, api=api, headers=headers, param=param)",
"def list(self, args):\n try:\n cloud = self._context.getCloudService()\n vdcs = cloud.listVirtualDatacenters()\n pprint_vdcs(vdcs)\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()",
"def list_smsa(self, kwargs):\n\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"sAMAccountName\", \"msDS-HostServiceAccountBL\"]\n entries = self.engine.query(self.engine.SMSA_FILTER(), attributes)\n\n if verbose:\n self.display(entries, verbose)\n else:\n for entry in entries:\n sam = entry['sAMAccountName']\n for host in entry[\"msDS-HostServiceAccountBL\"]:\n print(f'{sam}:{host}')",
"def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)",
"def get_all(self, marker=None, limit=None,\n sort_key='name', sort_dir='asc'):\n\n services = self._get_services(marker,\n limit,\n sort_key,\n sort_dir)\n\n return ServicesCollection.convert_with_links(services, limit,\n sort_key=sort_key,\n sort_dir=sort_dir)",
"def _scoped_servers(self):\n\n # If project scoped explicitly set the project list\n projects = None if utils.all_projects() else [pecan.request.token.project_id]\n\n # Must do a detailed search here as it returns the tenant_id field\n servers = self.compute.servers.list(search_opts={'all_tenants': 'True'})\n\n servers = Scope.filter(servers, projects=projects)\n return utils.paginate(servers, pecan.request.GET.get('marker'),\n pecan.request.GET.get('limit'))",
"def ls():\n return dynamodb.ls(OrganizationModel)",
"def list_servers(self, request, paginate):\n raise NotImplementedError",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def find_vms(self, name):\n script = (\n 'Get-SCVirtualMachine -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVirtualMachine(system=self, raw=vm_data) for vm_data in data]\n return [SCVirtualMachine(system=self, raw=data)]",
"def get_service_list():\n service_dict = requests.get('http://consul:8500/v1/catalog/services').json()\n service_list = []\n for s in service_dict:\n service_list.append(s)\n return service_list",
"def top_sources_male(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\"$project\": {\"outlet\": 1.0, \"sourcesMale\": 1.0}},\n {\"$unwind\": {\"path\": \"$sourcesMale\", \"preserveNullAndEmptyArrays\": False}},\n {\"$group\": {\"_id\": \"$sourcesMale\", \"count\": {\"$sum\": 1.0}}},\n {\"$sort\": {\"count\": args[\"sort\"]}},\n {\"$limit\": args[\"limit\"]},\n ]\n return query",
"def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def list_datacenters(conn=None, call=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The list_datacenters function must be called with -f or --function.\"\n )\n\n datacenters = []\n\n if not conn:\n conn = get_conn()\n\n for item in conn.list_datacenters()[\"items\"]:\n datacenter = {\"id\": item[\"id\"]}\n datacenter.update(item[\"properties\"])\n datacenters.append({item[\"properties\"][\"name\"]: datacenter})\n\n return {\"Datacenters\": datacenters}",
"def list(self, request):\n directors = Director.objects.all()\n\n # Note the additional `many=True` argument to the\n # serializer. It's needed when you are serializing\n # a list of objects instead of a single object.\n serializer = DirectorSerializer(\n directors, many=True, context={'request': request})\n return Response(serializer.data)",
"def station_list(request):\n center=request.session.__getitem__('center')\n ctrs = connection.Station.find({'cn': center.__unicode__()})\n return render(request, 'list_station.html',\n {'ctrs': ctrs}, content_type=\"text/html\")",
"def collection(self):\n return self.connection.smembers(self.collection_key)",
"def list(per_page=None, page=None):\n # Comprehension dict are not supported in Python 2.6-. You can use this commented line instead of the current\n # line when you drop support for Python 2.6.\n # pagination = {key: value for (key, value) in [('page', page), ('per_page', per_page)] if value}\n pagination = dict((key, value) for (key, value) in [('page', page), ('per_page', per_page)] if value)\n\n http_client = HttpClient()\n response, _ = http_client.get(routes.url(routes.CUSTOMER_RESOURCE, pagination=pagination))\n return resources.APIResourceCollection(resources.Customer, **response)",
"def list_vms(connection: str = None) -> list:\n with libvirt.open(connection) as conn:\n return conn.listAllDomains()",
"def Run(self, args):\n orgs_client = organizations.Client()\n return orgs_client.List(limit=args.limit, page_size=args.page_size)",
"def get(self):\n return GenericGet().get_catalogs()",
"def _list(self, account, page):\n response = self.client.get(self.get_url(account), data={\"page\": page})\n return [\n DomainResource(**item) for item in response['data']\n ], response['pagination']",
"def get_services(self):\n collection_list = []\n try:\n services = self.client.discover_services()\n if services:\n for service in services:\n if 'collection' in service.type.lower():\n for eachone in self.get_collection(service.address):\n collection_list.append({'name': eachone.name})\n break\n except Exception as e:\n demisto.error(\"Failed to fetch collections, exception:{}\".format(e))\n raise e\n\n return collection_list",
"def managed_devices(self):\n if \"managedDevices\" in self._prop_dict:\n return ManagedDevicesCollectionPage(self._prop_dict[\"managedDevices\"])\n else:\n return None",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def testCMSNametoList(self):\n result = self.mySiteDB.cmsNametoList(\"T1_US*\", \"SE\")\n self.assertItemsEqual(result, [u'cmsdcadisk01.fnal.gov'])",
"def list(self, detailed=False, search_opts=None, paginate_opts=None):\n if search_opts is None:\n search_opts = {}\n\n if paginate_opts is None:\n paginate_opts = {}\n\n qparams = {}\n\n for opt, val in search_opts.iteritems():\n if val:\n qparams[opt] = val\n\n for opt, val in paginate_opts.iteritems():\n if val:\n qparams[opt] = val\n\n query_string = \"?%s\" % urllib.urlencode(qparams) if qparams else \"\"\n\n detail = \"\"\n if detailed == 'detail_filter_and_sort':\n detail = \"/detail_filter_and_sort\"\n elif detailed:\n detail = \"/detail\"\n\n ret = self._list(\"/osds%s%s\" % (detail, query_string),\n \"osds\")\n return ret",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()"
] | [
"0.5484862",
"0.5384177",
"0.52866125",
"0.5137014",
"0.5131903",
"0.5116556",
"0.50167024",
"0.49888143",
"0.4976278",
"0.4908851",
"0.487563",
"0.48422065",
"0.4831081",
"0.48209724",
"0.48115847",
"0.4811006",
"0.480229",
"0.48014426",
"0.47716543",
"0.4765974",
"0.4758089",
"0.47313544",
"0.47231632",
"0.47212172",
"0.4718719",
"0.47161922",
"0.47092888",
"0.4700175",
"0.46996558",
"0.4676456"
] | 0.5991173 | 0 |
Updates the metrics configuration with the new values. Overwrites the existing configuration. [Arguments] | def fusion_api_update_metrics_configuration(self, body, api=None, headers=None):
return self.metrics.update(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_metrics(self):",
"def update_metrics(self, metrics, predictions, labels):\n return",
"def metrics(self, metrics):\n\n self._metrics = metrics",
"def set_metrics(self, metrics: List[Callable]) -> None:\n self.metrics = metrics",
"def update_state(self, **kwargs):\n\n for name in self.metrics:\n\n metric = self.metrics[name]\n\n argspec = inspect.getfullargspec(metric.update_state)\n\n kwargs_to_pass = {k: kwargs[k] for k in kwargs if k in argspec.args}\n\n metric.update_state(**kwargs_to_pass)",
"def conf_update(self):\n pass",
"def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)",
"def update_config(config, args):\n if args.cfg:\n _update_config_from_file(config, args.cfg)\n config.defrost()\n if args.dataset:\n config.DATA.DATASET = args.dataset\n if args.batch_size:\n config.DATA.BATCH_SIZE = args.batch_size\n config.DATA.BATCH_SIZE_EVAL = args.batch_size\n if args.batch_size_eval:\n config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval\n if args.image_size:\n config.DATA.IMAGE_SIZE = args.image_size\n if args.accum_iter:\n config.TRAIN.ACCUM_ITER = args.accum_iter\n if args.data_path:\n config.DATA.DATA_PATH = args.data_path\n if args.output:\n config.SAVE = args.output\n if args.eval:\n config.EVAL = True\n if args.pretrained:\n config.MODEL.PRETRAINED = args.pretrained\n if args.resume:\n config.MODEL.RESUME = args.resume\n if args.last_epoch:\n config.TRAIN.LAST_EPOCH = args.last_epoch\n if args.amp: # only for training\n config.AMP = not config.EVAL\n config.freeze()\n return config",
"def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config",
"def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config",
"def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)",
"def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count",
"def gpu_metrics(self, gpu_metrics: List[ClaraGpuUtilization]):\r\n self._gpu_metrics = gpu_metrics",
"def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())",
"def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass",
"def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self",
"def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset",
"def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()",
"def update_global_config(self, config, **kwargs):\n pass",
"def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))",
"def endpoint_metrics_set(self, endpoint_name=None, metrics=None):\n if metrics is None:\n raise Exception(\"Metrics required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/metrics', 'POST', body=metrics)\n else:\n self.request('/v1.1/endpoints/%s/metrics' % endpoint_name, 'POST', body=metrics)",
"def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)",
"def setPerfMetrics(self, perf_metrics):\n for event in perf_metrics.metric:\n attr_name = '%s_%s_%s' % (frontendConfig.glidein_perfmetric_prefix,\n perf_metrics.name, event)\n self.adParams[attr_name] = perf_metrics.event_lifetime(event)",
"def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))",
"def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])",
"def put_bucket_metrics_configuration(Bucket=None, Id=None, MetricsConfiguration=None):\n pass",
"def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)",
"def update_log_config(self, monitor_name, log_config):\n pass",
"def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track],\n metric_time: datetime.datetime, *args, **kwargs):\n\n raise NotImplementedError",
"def update_config_with_cli(args):\n parser = make_config_parser()\n default = parser[\"CLI\"]\n\n ## Update config\n global config\n\n # Handle the *_quality flags. These determine the section to read\n # and are stored in 'camera_config'. Note the highest resolution\n # passed as argument will be used.\n quality = _determine_quality(args)\n section = parser[quality if quality != constants.DEFAULT_QUALITY else \"CLI\"]\n\n # Loop over low quality for the keys, could be any quality really\n config.update({opt: section.getint(opt) for opt in parser[\"low_quality\"]})\n\n # The -r, --resolution flag overrides the *_quality flags\n if args.resolution is not None:\n if \",\" in args.resolution:\n height_str, width_str = args.resolution.split(\",\")\n height, width = int(height_str), int(width_str)\n else:\n height = int(args.resolution)\n width = int(16 * height / 9)\n config.update({\"pixel_height\": height, \"pixel_width\": width})\n\n # Handle the -c (--background_color) flag\n if args.background_color is not None:\n try:\n background_color = colour.Color(args.background_color)\n except AttributeError as err:\n logger.warning(\"Please use a valid color.\")\n logger.error(err)\n sys.exit(2)\n else:\n background_color = colour.Color(default[\"background_color\"])\n config[\"background_color\"] = background_color\n\n config[\"use_js_renderer\"] = args.use_js_renderer or default.getboolean(\n \"use_js_renderer\"\n )\n config[\"js_renderer_path\"] = args.js_renderer_path or default.get(\n \"js_renderer_path\"\n )\n\n # Set the rest of the frame properties\n config[\"frame_height\"] = 8.0\n config[\"frame_width\"] = (\n config[\"frame_height\"] * config[\"pixel_width\"] / config[\"pixel_height\"]\n )\n config[\"frame_y_radius\"] = config[\"frame_height\"] / 2\n config[\"frame_x_radius\"] = config[\"frame_width\"] / 2\n config[\"top\"] = config[\"frame_y_radius\"] * constants.UP\n config[\"bottom\"] = config[\"frame_y_radius\"] * constants.DOWN\n config[\"left_side\"] = config[\"frame_x_radius\"] * constants.LEFT\n config[\"right_side\"] = config[\"frame_x_radius\"] * constants.RIGHT\n\n # Handle the --tex_template flag, if the flag is absent read it from the config.\n if args.tex_template:\n tex_fn = os.path.expanduser(args.tex_template)\n else:\n tex_fn = default[\"tex_template\"] if default[\"tex_template\"] != \"\" else None\n\n if tex_fn is not None and not os.access(tex_fn, os.R_OK):\n # custom template not available, fallback to default\n logger.warning(\n f\"Custom TeX template {tex_fn} not found or not readable. \"\n \"Falling back to the default template.\"\n )\n tex_fn = None\n config[\"tex_template_file\"] = tex_fn\n config[\"tex_template\"] = (\n TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()\n )\n\n ## Update file_writer_config\n fw_config = {}\n\n if config[\"use_js_renderer\"]:\n fw_config[\"disable_caching\"] = True\n\n if not hasattr(args, \"subcommands\"):\n fw_config[\"input_file\"] = args.file if args.file else \"\"\n fw_config[\"scene_names\"] = (\n args.scene_names if args.scene_names is not None else []\n )\n fw_config[\"output_file\"] = args.output_file if args.output_file else \"\"\n\n # Note ConfigParser options are all strings and each needs to be converted\n # to the appropriate type.\n for boolean_opt in [\n \"preview\",\n \"show_in_file_browser\",\n \"leave_progress_bars\",\n \"write_to_movie\",\n \"save_last_frame\",\n \"save_pngs\",\n \"save_as_gif\",\n \"write_all\",\n \"disable_caching\",\n \"flush_cache\",\n \"log_to_file\",\n ]:\n attr = getattr(args, boolean_opt)\n fw_config[boolean_opt] = (\n default.getboolean(boolean_opt) if attr is None else attr\n )\n # for str_opt in ['media_dir', 'video_dir', 'tex_dir', 'text_dir']:\n for str_opt in [\"media_dir\"]:\n attr = getattr(args, str_opt)\n fw_config[str_opt] = os.path.relpath(default[str_opt]) if attr is None else attr\n attr = getattr(args, \"log_dir\")\n fw_config[\"log_dir\"] = (\n os.path.join(fw_config[\"media_dir\"], default[\"log_dir\"])\n if attr is None\n else attr\n )\n dir_names = {\n \"video_dir\": \"videos\",\n \"images_dir\": \"images\",\n \"tex_dir\": \"Tex\",\n \"text_dir\": \"texts\",\n }\n for name in dir_names:\n fw_config[name] = os.path.join(fw_config[\"media_dir\"], dir_names[name])\n\n # the --custom_folders flag overrides the default folder structure with the\n # custom folders defined in the [custom_folders] section of the config file\n fw_config[\"custom_folders\"] = args.custom_folders\n if fw_config[\"custom_folders\"]:\n fw_config[\"media_dir\"] = parser[\"custom_folders\"].get(\"media_dir\")\n for opt in [\"video_dir\", \"images_dir\", \"tex_dir\", \"text_dir\"]:\n fw_config[opt] = parser[\"custom_folders\"].get(opt)\n\n # Handle the -s (--save_last_frame) flag: invalidate the -w flag\n # At this point the save_last_frame option has already been set by\n # both CLI and the cfg file, so read the config dict directly\n if fw_config[\"save_last_frame\"]:\n fw_config[\"write_to_movie\"] = False\n\n # Handle the -t (--transparent) flag. This flag determines which\n # section to use from the .cfg file.\n section = parser[\"transparent\"] if args.transparent else default\n for opt in [\"png_mode\", \"movie_file_extension\", \"background_opacity\"]:\n fw_config[opt] = section[opt]\n\n # Handle the -n flag. Read first from the cfg and then override with CLI.\n # These two are integers -- use getint()\n for opt in [\"from_animation_number\", \"upto_animation_number\"]:\n fw_config[opt] = default.getint(opt)\n if fw_config[\"upto_animation_number\"] == -1:\n fw_config[\"upto_animation_number\"] = float(\"inf\")\n nflag = args.from_animation_number\n if nflag is not None:\n if \",\" in nflag:\n start, end = nflag.split(\",\")\n fw_config[\"from_animation_number\"] = int(start)\n fw_config[\"upto_animation_number\"] = int(end)\n else:\n fw_config[\"from_animation_number\"] = int(nflag)\n\n # Handle the --dry_run flag. This flag determines which section\n # to use from the .cfg file. All options involved are boolean.\n # Note this overrides the flags -w, -s, -a, -g, and -i.\n if args.dry_run:\n for opt in [\n \"write_to_movie\",\n \"save_last_frame\",\n \"save_pngs\",\n \"save_as_gif\",\n \"write_all\",\n ]:\n fw_config[opt] = parser[\"dry_run\"].getboolean(opt)\n if not fw_config[\"write_to_movie\"]:\n fw_config[\"disable_caching\"] = True\n # Read in the streaming section -- all values are strings\n fw_config[\"streaming\"] = {\n opt: parser[\"streaming\"][opt]\n for opt in [\n \"live_stream_name\",\n \"twitch_stream_key\",\n \"streaming_protocol\",\n \"streaming_ip\",\n \"streaming_protocol\",\n \"streaming_client\",\n \"streaming_port\",\n \"streaming_port\",\n \"streaming_console_banner\",\n ]\n }\n\n # For internal use (no CLI flag)\n fw_config[\"skip_animations\"] = fw_config[\"save_last_frame\"]\n fw_config[\"max_files_cached\"] = default.getint(\"max_files_cached\")\n if fw_config[\"max_files_cached\"] == -1:\n fw_config[\"max_files_cached\"] = float(\"inf\")\n # Parse the verbosity flag to read in the log level\n verbosity = getattr(args, \"verbosity\")\n verbosity = default[\"verbosity\"] if verbosity is None else verbosity\n fw_config[\"verbosity\"] = verbosity\n logger.setLevel(verbosity)\n\n # Parse the ffmpeg log level in the config\n ffmpeg_loglevel = parser[\"ffmpeg\"].get(\"loglevel\", None)\n fw_config[\"ffmpeg_loglevel\"] = (\n constants.FFMPEG_VERBOSITY_MAP[verbosity]\n if ffmpeg_loglevel is None\n else ffmpeg_loglevel\n )\n\n # Parse the progress_bar flag\n progress_bar = getattr(args, \"progress_bar\")\n if progress_bar is None:\n progress_bar = default.getboolean(\"progress_bar\")\n fw_config[\"progress_bar\"] = progress_bar\n\n global file_writer_config\n file_writer_config.update(fw_config)"
] | [
"0.6480207",
"0.6306653",
"0.61963147",
"0.60742277",
"0.5940462",
"0.5824282",
"0.57890254",
"0.5690043",
"0.56871504",
"0.5668699",
"0.5658537",
"0.562513",
"0.5568342",
"0.55321926",
"0.5525956",
"0.55169654",
"0.54595894",
"0.54588157",
"0.5418729",
"0.53867775",
"0.53706414",
"0.5356964",
"0.53456104",
"0.5342505",
"0.5315193",
"0.53028786",
"0.5291599",
"0.52859217",
"0.5263467",
"0.5250716"
] | 0.6502667 | 0 |
Fetches the list of resource types and supported metrics that OneView is capable of relaying. [Example] ${resp} = Fusion Api Get Metrics Capability | | | def fusion_api_get_metrics_capability(self, api=None, headers=None):
return self.metrics.get(api=api, headers=headers, param='/capability') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supported_metrics(cls) -> List[str]:\n ...",
"async def get_capability_report(self):\n if self.query_reply_data.get(\n PrivateConstants.CAPABILITY_QUERY) is None:\n await self._send_sysex(PrivateConstants.CAPABILITY_QUERY, None)\n while self.query_reply_data.get(\n PrivateConstants.CAPABILITY_RESPONSE) is None:\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.CAPABILITY_RESPONSE)",
"async def get_capability_report(self):\n if self.query_reply_data.get(\n PrivateConstants.CAPABILITY_QUERY) is None:\n await self._send_sysex(PrivateConstants.CAPABILITY_QUERY, None)\n while self.query_reply_data.get(\n PrivateConstants.CAPABILITY_RESPONSE) is None:\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.CAPABILITY_RESPONSE)",
"async def handle_metrics(request: Request):\n content, http_headers = aioprometheus.render(\n app.registry, [request.headers.get(\"accept\")]\n )\n return Response(content, headers=http_headers)",
"def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def list_metrics(self):\n pass",
"def get_metrics(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the response data frame\n self.response = self.model.metrics_df\n \n # Send the reponse table description to Qlik\n if self.model.validation == \"hold-out\":\n if self.model.estimator_type == \"classifier\":\n self._send_table_description(\"metrics_clf\")\n elif self.model.estimator_type == \"regressor\":\n self._send_table_description(\"metrics_reg\")\n elif self.model.validation in [\"k-fold\", \"timeseries\"]:\n if self.model.estimator_type == \"classifier\":\n self._send_table_description(\"metrics_clf_cv\")\n elif self.model.estimator_type == \"regressor\":\n self._send_table_description(\"metrics_reg_cv\")\n else:\n err = \"Metrics are not available. Make sure the machine learning pipeline includes K-fold cross validation or hold-out testing.\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def list_metrics(ctx, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n from wily.commands.list_metrics import list_metrics\n\n list_metrics(wrap)",
"def get_metric_list(config):\n metric_list = []\n url = config[\"OPENTSDB_URL\"] + \"/api/suggest?type=metrics&q=\"\n response = requests.get(url)\n if response.status_code == 200:\n metric_list = response.json()\n logger.debug(\"Get metric list from opentsdb: \" + str(metric_list))\n return metric_list",
"def GetMetricTypes(self, request, context):\n LOG.debug(\"GetMetricTypes called\")\n try:\n metrics = self.plugin.update_catalog(ConfigMap(pb=request.config))\n return MetricsReply(metrics=[m.pb for m in metrics])\n except Exception as err:\n msg = \"message: {}\\n\\nstack trace: {}\".format(\n err, traceback.format_exc())\n return MetricsReply(metrics=[], error=msg)",
"def test_get_hyperflex_capability_info_list(self):\n pass",
"def metrics(self) -> pulumi.Output['outputs.RuntimeMetricsResponse']:\n return pulumi.get(self, \"metrics\")",
"def list_definition(self):\n return self._get(path='metrics')",
"def get_prom_metrics(self):\n base_url = self.get_config().get(\"prometheus_endpoint\", PROM_BASE_URL).rstrip(\"/\")\n\n url = \"%s%slabel/__name__/values\" % (base_url, PROM_API_PATH)\n\n self.debug(\"Getting url: \", url)\n r = requests.get(url)\n\n assert r.status_code == 200, \"Prometheus server returned http code: \" + str(r.status_code)\n\n try:\n data = r.json()\n except:\n raise Exception(\"Failed to parse Prometheus JSON response\")\n\n self.debug(\"Got reponse data: \", data)\n\n assert (\"status\" in data and data[\"status\"] == \"success\"), \"Prometheus server did not return status success\"\n assert \"data\" in data, \"Prometheus server did not return data in output\"\n assert len(data[\"data\"]) > 0, \"Prometheus server returned no metrics\"\n\n known_metrics = data[\"data\"]\n assert isinstance(known_metrics, list)",
"def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results",
"def get_metrics() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n data = get_sensors_data(cur)\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def metrics(self, request):\n return OtterMetrics(self.store).app.resource()",
"def metrics(self) -> list:\n my_metrics = [\n FramesMetric(\"frames\"),\n FPSMetric(\"fps\"),\n EpisodeRewardMetric('PMM:episode_rewards'),\n EpisodeRewardMetricQuantile('P09:episode_rewards', quantile=0.9),\n EpisodeRewardMetricQuantile('P01:episode_rewards', quantile=0.1),\n EpisodeLengthMetric(\"episode_length\")\n ]\n\n return my_metrics + self.algo.metrics() + self.env_roller.metrics()",
"def capability_response(self, data):\n self.capability_query_results = data",
"def get():\n\n l2ca_info = caps.l2ca_info()\n\n res = {\n 'cache_size': l2ca_info['cache_size'],\n 'cw_size': l2ca_info['cache_way_size'],\n 'cw_num': l2ca_info['cache_ways_num'],\n 'clos_num': l2ca_info['clos_num'],\n 'cdp_supported': l2ca_info['cdp_supported'],\n 'cdp_enabled': l2ca_info['cdp_enabled']\n }\n return res, 200",
"def get_metric_list(self) -> List[str]:\n ...",
"def get_capabilities(self):\n\n service = self.__get_service()\n capability = self.__get_capability()\n contents = {\"service\" : service, \"capability\" : capability}\n return contents, self.params['format']",
"def query_upgrade_capability(self):\n self.response = self.request('GET', self.capability_endpoint, \"\")\n self.log.debug(self.response.status)\n response = self.response.read()\n capability_schema = ControllerUpgradeCapabilitySchema()\n capability_schema.set_data(response, self.accept_type)\n return capability_schema",
"def test_result_fields_with_metrics(cbcsdk_mock):\n api = cbcsdk_mock.api\n result = Result(api, initial_data=GET_RUN_RESULTS_RESP_1)\n metrics = result.metrics_\n assert metrics._info == {\"cpu\": 24.3, \"memory\": 8.0}",
"def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']",
"def metrics(env):\n envs = environments()\n check_env(env, envs)\n\n metrics = get_or_abort(puppetdb._query, 'mbean')\n return render_template('metrics.html',\n metrics=sorted(metrics.keys()),\n envs=envs,\n current_env=env)",
"def metrics_get(period):\n return flask.jsonify({\"message\": \"noop\"}), 200",
"def get_capabilities(self, array_id=None):\n return_response = list()\n response = self.common.get_resource(\n category=WLP, resource_level='capabilities',\n resource_type=SYMMETRIX)\n if array_id:\n for wlp_info in response.get('symmetrixCapability'):\n if wlp_info.get('symmetrixId') == array_id:\n return_response = [wlp_info]\n break\n else:\n return_response = response.get('symmetrixCapability', list())\n\n return return_response",
"def resources(self) -> pulumi.Output[Sequence['outputs.MachineExtensionResponse']]:\n return pulumi.get(self, \"resources\")"
] | [
"0.6440414",
"0.6186461",
"0.6186461",
"0.6145166",
"0.61392456",
"0.5982165",
"0.5919442",
"0.5864791",
"0.58446157",
"0.58444005",
"0.5822578",
"0.578648",
"0.57689685",
"0.5727299",
"0.5675176",
"0.56600803",
"0.56364053",
"0.5615627",
"0.5610349",
"0.55961037",
"0.55608296",
"0.55278045",
"0.5484432",
"0.54675937",
"0.54651475",
"0.5463547",
"0.5431445",
"0.54158866",
"0.5414188",
"0.53617144"
] | 0.69907576 | 0 |
Fetches the current configuration for which metrics are being relayed. [Example] ${resp} = Fusion Api Get Metrics Configuration | | | def fusion_api_get_metrics_configuration(self, api=None, headers=None):
return self.metrics.get(api=api, headers=headers, param='/configuration') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(req):\n #try:\n # user_id = req.user\n #except KeyError as e:\n # msg = req.get_error_msg(e)\n # return send_error_response(msg)\n try:\n config = tools_config_get_config(req)\n except Exception:\n raise http_exc.HTTPClientError()\n else:\n return Response(json_body=json.dumps(config), content_type='application/json')",
"def config(self) -> pulumi.Output['outputs.ConfigResponse']:\n return pulumi.get(self, \"config\")",
"def get(self, request, format=None):\n return Response({k: getattr(config, k) for k in list(dir(config))})",
"def processGetConfig(self, msg):\r\n resp = MsgHelper.createResponse(Messages.RSP_GET_CONFIG, msg)\r\n resp[RunInto] = self.runInto\r\n resp[ExecDelay] = self.execDelay\r\n resp[ByStep] = self.stepByStep\r\n return resp",
"def fusion_api_get_configuration(self, uri=None, param='', api=None, headers=None):\n return self.configuration.get(uri=uri, api=api, headers=headers, param=param)",
"def config(self) -> 'outputs.DeviceConfigResponse':\n return pulumi.get(self, \"config\")",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_configuration() -> Response: # noqa: E501\n config = rabbitMQ_manager.get_configuration()\n if config is not None:\n return Response(\n json.dumps(config),\n status=200\n )\n return Response(\n status=500\n )",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def get(self, session: Session = None) -> Response:\n return jsonify(self.manager.config)",
"def _get_config():\n resp = requests.get(TRAEFIK_API_URL)\n if not resp.ok:\n raise Exception(\n \"Bad traefik response: %s %s\" % (resp.status_code, resp.text)\n )\n return resp.json()",
"def get(self) -> dict:\n return Config.get()",
"def getCampaignConfig(docName, url=reqmgr_url):\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\"}\n conn = make_x509_conn(url)\n url = '/reqmgr2/data/campaignconfig/%s' % docName\n conn.request(\"GET\", url, headers=headers)\n r2 = conn.getresponse()\n data = json.loads(r2.read())\n return data['result']",
"def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")",
"async def get_config(desired_config: ConfigName):\n redis = app.state.redis\n if desired_config == \"server\":\n return orjson.loads(await redis.get_key(\"influxdb_server\"))\n if desired_config == \"organizations\":\n return orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if desired_config == \"buckets\":\n return orjson.loads(await redis.get_key(\"influxdb_buckets\"))\n if desired_config == \"measurements\":\n return orjson.loads(await redis.get_key(\"influxdb_measurements\"))",
"async def get_current_configuration(\n self,\n keep_empty_params=False\n ):\n http_method = \"get\".upper()\n api_url = format_url(f\"\"\"\n {self._base_url}\n /api/v1/threats/configuration\n \"\"\")\n\n body = {}\n headers = {}\n form = {}\n\n request, error = await self._request_executor.create_request(\n http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params\n )\n\n if error:\n return (None, None, error)\n\n response, error = await self._request_executor\\\n .execute(request, ThreatInsightConfiguration)\n\n if error:\n return (None, response, error)\n\n try:\n result = ThreatInsightConfiguration(\n self.form_response_body(response.get_body())\n )\n except Exception as error:\n return (None, response, error)\n return (result, response, None)",
"def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder",
"def get_configuration():\r\n if not hasattr(CURRENT_REQUEST_CONFIGURATION, 'data'):\r\n return {}\r\n\r\n return CURRENT_REQUEST_CONFIGURATION.data",
"def config_get():\n server_config = db.get().server_config_get()\n\n if not server_config:\n return flask.jsonify({\n \"message\": \"Netmet server has not been setup yet\"}), 404\n\n return flask.jsonify(server_config), 200",
"def get_config():\n return CONFIG",
"def GetConfig(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def monitoring_config(self) -> 'outputs.MonitoringConfigResponse':\n return pulumi.get(self, \"monitoring_config\")",
"def configuration_info(self) -> Optional['outputs.ConfigurationInfoResponse']:\n return pulumi.get(self, \"configuration_info\")",
"def health_check_configuration(self) -> pulumi.Output['outputs.ServiceHealthCheckConfiguration']:\n return pulumi.get(self, \"health_check_configuration\")",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
] | [
"0.66372234",
"0.6462638",
"0.6452565",
"0.6445916",
"0.6443163",
"0.6286106",
"0.6253097",
"0.62112087",
"0.61938095",
"0.6167818",
"0.6142554",
"0.6061876",
"0.6059525",
"0.6024392",
"0.6022809",
"0.59841913",
"0.5945762",
"0.5941983",
"0.59398097",
"0.5911986",
"0.5910538",
"0.5900406",
"0.5880849",
"0.5873109",
"0.58529276",
"0.58529276",
"0.58529276",
"0.58529276",
"0.58529276",
"0.58529276"
] | 0.70904857 | 0 |
Creates a Network Set [Arguments] | def fusion_api_create_network_set(self, body, api=None, headers=None):
return self.network_set.create(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_network_set(name, networkUris=[]):\n\n return {\n 'name': name,\n 'type': 'network-set',\n 'nativeNetworkUri': None,\n 'networkUris': networkUris[:],\n 'connectionTemplateUri': None}",
"def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)",
"def createAddressSet(self) -> ghidra.program.model.address.AddressSet:\n ...",
"def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()",
"def createIpSetList(set_list_name):\n result = subprocess.Popen(\"/usr/sbin/ipset list\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if \"Name: %s\" % set_list_name in result:\n # Flush existing set.\n #result = subprocess.Popen(\"/usr/sbin/ipset flush %s 2>&1\" % set_list_name, shell=True, stdout=subprocess.PIPE).stdout.read()\n result = \"\"\n else:\n # Create new set.\n result = subprocess.Popen(\"/usr/sbin/ipset -N %s hash:net 2>&1\" % set_list_name, shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not create ipset %s. Error: %s.\" % (set_list_name, result))\n sys.exit(255)",
"def test_create_network():\n _network = Network()",
"def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set",
"def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)",
"def create_network(layers):\r\n return NeuronNetwork(layers)",
"def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])",
"def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)",
"def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])",
"def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)",
"def _createNetwork(self,verbose):\n filename,n,rulesList = self.filename,self.n,self.rulesList\n if self.verbose:\n mult = 2\n if self.MichaelisMenten:\n mult = 4\n start,startWall = cpuTime(),wallTime()\n print(\"\")\n print(\"Creating network with \"+str(n)+\" activation sites\")\n print(\" and \"+str(len(rulesList))+\" additional rules (\" \\\n +str(mult*(n+len(rulesList)))+\" parameters).\")\n \n namesList = writeBNGL.writeBNGLnetwork(n,rulesList,filename, \\\n MichaelisMenten=self.MichaelisMenten)\n self._runBNGLfile(filename)\n \n if self.verbose:\n print(\"Network creation took \"+bothTimeStr(start,startWall))\n \n return namesList",
"def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()",
"def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)",
"def get_network(name):\n _register()\n if name not in __sets:\n raise KeyError('Unknown dataset: {}'.format(name))\n net = __sets[name].setup()\n return net",
"def create(data):\n \n return Setlist(\n list_id = data['id'],\n name = data['name'],\n items = data['num_sets'])",
"def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G",
"def _build_network(self):\n pass",
"def __init__(self, input, output, options, local=False):\n super().__init__(\n \"create_training_set\",\n None,\n input,\n output,\n local,\n \"multi_training_set.snakefile\",\n )\n self.options = options",
"def networks_argparse(parser):\n # First of all, we store action value\n subparsers = parser.add_subparsers(help='Action', dest='action')\n\n # All action value are listed here\n # - list: list all item in networks\n # - create: create a new network\n # - update: modify a existing network. All value are not mutable\n # - delete: destroy a network\n # - show: show detail of a specific network\n # - add: add a ip address\n # - remove: remove a ip address\n # - display: display all entries in a address\n # - include: include a entry in a address\n # - exclude: exclude a entry in a address\n subparsers.add_parser('list', help='list all networks')\n create = subparsers.add_parser('create', help='create new network')\n update = subparsers.add_parser('update', help='update network information')\n delete = subparsers.add_parser('delete', help='delete a network')\n show = subparsers.add_parser('show', help='show detail of a specific network')\n add = subparsers.add_parser('add', help='add a address on a network')\n remove = subparsers.add_parser('remove', help='remove a address on a network')\n display = subparsers.add_parser('display', help='display NS entries in a address')\n include = subparsers.add_parser('include', help='include a NS entry in a address')\n exclude = subparsers.add_parser('exclude', help='exclude a NS entry in a address')\n\n # To create a network, we need a network name, a network address and prefix,\n # and optionaly\n # - description: a description of the network\n # - gateway: the network gateway\n # - contact: a contact email for the network\n # - dns-master: the DNS master of reverse resolution\n # - dhcp: the DHCP server for the network\n # - vlan: the VLAN id\n create.add_argument('network', help='network name')\n create.add_argument('--address', help='network address', required=True)\n create.add_argument('--prefix', help='network prefix', required=True)\n create.add_argument('--description', help='a description of the network')\n create.add_argument('--gateway', help='the network gateway address')\n create.add_argument('--contact', help='a contact email for the network')\n create.add_argument('--dns-master', help='DNS master address for reverse DNS')\n create.add_argument('--dhcp', help='DHCP server address')\n create.add_argument('--radius', help='Radius server address')\n create.add_argument('--vlan', help='VLAN id')\n\n # To delete a network, we just need to know the name\n delete.add_argument('network', help='network name')\n\n # To update network information, we need the network name and the following value\n # are mutable\n # - description: a description of the network\n # - gateway: the network gateway\n # - contact: a contact email for the network\n # - dns-master: the DNS master of reverse resolution\n # - dhcp: the DHCP server for the network\n # - vlan: the VLAN id\n update.add_argument('network', help='network name')\n update.add_argument('--description', help='a description of the network')\n update.add_argument('--gateway', help='the network gateway address')\n update.add_argument('--contact', help='a contact email for the network')\n update.add_argument('--dns-master', help='DNS master address for reverse DNS')\n update.add_argument('--dhcp', help='DHCP server address')\n update.add_argument('--radius', help='Radius server address')\n update.add_argument('--vlan', help='VLAN id')\n\n # To have detail of a specific network, we just need the network name\n show.add_argument('network', help='network you want to show')\n\n # To add a new ip we need the network name and the following optionals value\n add.add_argument('network', help='network name')\n add.add_argument('--ip-address', help='IP address')\n add.add_argument('--default-name', help='Default DNS name')\n\n # To remove a ip address, we need to now the network and ip address\n remove.add_argument('network', help='network name')\n remove.add_argument('--ip-address', help='IP address', required=True)\n\n # To include a entry in ip address, we need network, address and a fqdn\n display.add_argument('network', help='network name')\n display.add_argument('address', help='address IP')\n\n # To include a entry in ip address, we need network, address and a fqdn\n include.add_argument('network', help='network name')\n include.add_argument('address', help='address IP')\n include.add_argument('fqdn', help='Full Qualified Domain Name')\n include.add_argument('--type', help='NS type')\n\n # To exclude a entry in ip address, we need network, address and a fqdn\n exclude.add_argument('network', help='network name')\n exclude.add_argument('address', help='address IP')\n exclude.add_argument('fqdn', help='Full Qualified Domain Name')\n exclude.add_argument('--type', help='NS type')",
"def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()",
"def create_network_bulk(self, tenant_id, network_list, sync=False):",
"def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)",
"def initialise_network(self):\n raise NotImplementedError",
"def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()",
"def test_create_cluster_network(self):\n pass",
"def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network",
"def create_network(address=None, **options):\n return NetworkDefinition(address, **options)"
] | [
"0.7187186",
"0.6550994",
"0.6196915",
"0.61925775",
"0.60246813",
"0.5957251",
"0.59493995",
"0.5948319",
"0.59389955",
"0.59347576",
"0.59132177",
"0.5890726",
"0.5858046",
"0.58464783",
"0.5839262",
"0.5808722",
"0.58069295",
"0.57550144",
"0.57415485",
"0.5733353",
"0.57094055",
"0.5704139",
"0.56911385",
"0.56866753",
"0.5683468",
"0.5655763",
"0.5655729",
"0.562632",
"0.56250554",
"0.55997616"
] | 0.6844055 | 1 |
Updates an Network Set [Arguments] | def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):
return self.network_set.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)",
"def perform_set(self, nodes=[]):\n\n # Input validation\n try:\n # Works for XGNodeDict input\n set_nodes = nodes.get_updates()\n except (AttributeError, TypeError):\n # Assume list instead\n set_nodes = nodes\n if not isinstance(set_nodes, list):\n raise ValueError('Expecting nodes to be of type list')\n else:\n for x in set_nodes:\n if not isinstance(x, XGNode):\n raise ValueError('Invalid node: {0}'.format(x.__class__))\n\n req = cinder.volume.drivers.violin.vxg.core.request.XGSet(set_nodes)\n resp = self.send_request(req)\n try:\n # Works for XGNodeDict input, clear the tracked modifications\n nodes.clear_updates()\n except (AttributeError, TypeError):\n pass\n return resp.as_action_result()",
"def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)",
"def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)",
"def UpdateNetworkID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _SetNodes(self, nodes: int) -> None:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'update', self.name)\n cmd.flags['nodes'] = nodes\n cmd.Issue(raise_on_failure=True)",
"def update_net(self) -> None:\n self.units.update_net()",
"def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)",
"def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)",
"def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()",
"def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net",
"def alter_network(self, add=[], remove=[]):\n\n self.network.edges.add_many(add)\n self.network.edges.remove_many(remove)\n return self.score_network()",
"def set_network(self, network: str) -> None:\n return self.add_value(self._network_attribute, network)",
"def update(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return UpdateQuery(cls.runtime)",
"def network_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n networks = self.client.networks.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n continue\n\n if len(networks) == 0:\n Console.info(\"No network exist\" + host['Ip'])\n continue\n\n for networkm in networks:\n network = networkm.__dict__['attrs']\n network['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(network)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n r = Rest.delete('Network', filter)\n r = Rest.post('Network', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))",
"def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None",
"def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")",
"def update_one_set_inventory(set_num):\n set_inv = reapi.pull_set_inventory(set_num)",
"def network_ids(self, network_ids):\n\n self._network_ids = network_ids",
"def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def update_sets(check_update=1):\n\n set_list = reapi.pull_set_catalog()\n secondary_sets.add_sets_to_database(set_list, update=check_update)",
"def _val_modify_network_config(args):\n if set(args) != set(['rel', 'old', 'chg']):\n return False\n if not isinstance(args['rel'], list):\n return False\n for elt in args['rel']:\n if not isinstance(elt, basestring):\n return False\n return True",
"def assign_networks(cls, instance, networks):\n instance.assigned_networks_list = networks\n db().flush()",
"def update(self, dict=None, **kwargs):\n data = {}\n if dict:\n data.update(dict, **kwargs)\n else:\n data.update(**kwargs)\n self.multi_set(data)",
"def command_update(arguments):\n global current_name\n tag = arguments[0]\n if (len(arguments) == 2):\n old_target, new_target = (...), arguments[1]\n else:\n old_target, new_target = arguments[1:]\n\n to_replace = network[current_name, tag, old_target]\n if not len(to_replace):\n return '\"' + tag + ': ' + old_target + '\" - no such link for this entity'\n if len(to_replace) > 1:\n return 'Sorry, tag \"' + tag + '\" is ambiguous.'\n inverse_tag = to_replace[0].inverse_tag\n to_replace.unlink()\n network.addlink(current_name, tag, new_target, inverse_tag)\n\n return 'Updated link from \"' + tag + ': ' + old_target + '\" to \"' + tag + ': ' + new_target + '\"'",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")",
"def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)",
"def Set(*args):\n return _XCAFDoc.XCAFDoc_GraphNode_Set(*args)",
"def test_networking_project_network_update(self):\n pass"
] | [
"0.62092996",
"0.5808159",
"0.57929206",
"0.57902926",
"0.56701416",
"0.56625813",
"0.5598704",
"0.55421346",
"0.5522033",
"0.5504297",
"0.54240537",
"0.54162216",
"0.54075015",
"0.5389637",
"0.53532296",
"0.53340536",
"0.5332989",
"0.53147435",
"0.53050166",
"0.5304132",
"0.5288086",
"0.52614146",
"0.5216872",
"0.5181463",
"0.5173543",
"0.517243",
"0.51479363",
"0.5147067",
"0.51028013",
"0.5091397"
] | 0.678865 | 0 |
Deletes a Network Set from the appliance based on name OR uri [Arguments] | def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):
return self.network_set.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)",
"def delete_network(self, network):\r\n return self.delete(self.network_path % (network))",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete_set(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.delete_set\")\n\n flg.info(\"Set to delete: {}\".format(set_name))\n\n if mc.objExists(set_name):\n mc.select(set_name)\n old_objects = mc.ls(selection=True)\n flg.debug(\"Old Objects:\")\n for o in old_objects:\n flg.debug(o)\n ref_objects = mc.ls(selection=True, referencedNodes=True)\n\n ref_del_queue = []\n if len(ref_objects) > 0:\n flg.debug(\"Old Reference Nodes:\")\n for o in ref_objects:\n flg.debug(o)\n for o in ref_objects:\n flg.debug(\"Queuing {} for reference removal\".format(o))\n top = mc.referenceQuery(o, referenceNode=True)\n ref_del_queue.append(top)\n if len(ref_del_queue):\n for o in ref_del_queue:\n flg.debug(\"Removing reference: {}\".format(o))\n ref_file = mc.referenceQuery(o, filename=True)\n mc.file(ref_file, removeReference=True)\n for o in old_objects:\n try:\n flg.debug(\"Deleting {}\".format(o))\n mc.delete(o)\n except ValueError as e:\n flg.debug(\"Unable to delete {0}. Error: {1}\".format(o, e))\n flg.debug(\"Deleting set: {}\".format(set_name))\n mc.delete(set_name)",
"def delete(log, session, args):\n log('imageset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete imageset command coming soon.')",
"def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)",
"def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)",
"def delete_network_bulk(self, tenant_id, network_id_list, sync=False):",
"def test_delete_collection_cluster_network(self):\n pass",
"def test_delete_network(self):\n pass",
"def destroyIpSetList(set_list_name):\n result = subprocess.Popen(\"/usr/sbin/ipset destroy %s 2>&1\" % set_list_name, shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy ipset %s. Error: %s.\" % (set_list_name, result))\n sys.exit(255)",
"def Delete(url):\n\n prefix = ''.join([url, config_encoder.NAMESPACE_SEPARATOR])\n\n # Remove Test Suites\n test_keys = _GetEntityKeysByPrefix(ndb_models.Test, prefix)\n ndb.delete_multi(test_keys)\n\n # Remove Device Actions\n device_action_keys = _GetEntityKeysByPrefix(ndb_models.DeviceAction, prefix)\n ndb.delete_multi(device_action_keys)\n\n # Remove Test Run Actions\n test_run_action_keys = _GetEntityKeysByPrefix(\n ndb_models.TestRunAction, prefix)\n ndb.delete_multi(test_run_action_keys)\n\n # Remove Config Set Info\n config_set_info_key = mtt_messages.ConvertToKey(ndb_models.ConfigSetInfo, url)\n config_set_info_key.delete()",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data",
"def test_delete__network(self):\n arglist = [\n '--network',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'network'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_called_once_with(\n self.projects[0].id,\n )",
"def delete(self, name, *args):\n\n if isinstance(name, string_types):\n name = dns.name.from_text(name, None)\n if len(args) == 0:\n self.find_rrset(self.authority, name, dns.rdataclass.ANY,\n dns.rdatatype.ANY, dns.rdatatype.NONE,\n dns.rdatatype.ANY, True, True)\n elif isinstance(args[0], dns.rdataset.Rdataset):\n for rds in args:\n for rd in rds:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n args = list(args)\n if isinstance(args[0], dns.rdata.Rdata):\n for rd in args:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n rdtype = args.pop(0)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if len(args) == 0:\n self.find_rrset(self.authority, name,\n self.zone_rdclass, rdtype,\n dns.rdatatype.NONE,\n dns.rdataclass.ANY,\n True, True)\n else:\n for s in args:\n rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,\n self.origin)\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)",
"def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"def delete_network_profile(arn=None):\n pass",
"def test_delete_cluster_network(self):\n pass",
"def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\""
] | [
"0.6744954",
"0.6403101",
"0.63075125",
"0.6301626",
"0.62825984",
"0.6127344",
"0.6117718",
"0.609198",
"0.6062245",
"0.5978882",
"0.5971199",
"0.596836",
"0.5957365",
"0.58755976",
"0.5868253",
"0.58431166",
"0.5820259",
"0.5812668",
"0.5793323",
"0.5763212",
"0.5739048",
"0.5709944",
"0.5702383",
"0.5697702",
"0.56944746",
"0.568361",
"0.56712884",
"0.56454366",
"0.56431055",
"0.56417614"
] | 0.7952963 | 0 |
Adds an Power Delivery Device. [Arguments] | def fusion_api_add_power_device(self, body, api=None, headers=None):
return self.pd.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_device(cls, values):\n return cls.dbdriver.add_device(values)",
"def add_device(self, field, device, uc):\n self._devices[field] = device\n self._uc[field] = uc",
"def addDevice(self, node, fullDeviceName, device):",
"def addDeviceDescriptor(string: str, deviceDescriptor: cern.japc.core.DeviceDescriptor) -> None:\n ...",
"def Add_Cisco_Device(device_type, host, username, password):\n cisco_device = main(device_type, host, username, password)\n cisco_list.append(cisco_device)",
"def add_device(self, device_connection):\n self.devices.append(Device(device_connection, self.current_time))",
"def test_add_device(self):\n\n pass",
"def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self",
"def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self",
"def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self",
"def add_device(self, noInit=True, **kwargs):\n self.epicsLive.add_device(noInit=noInit, **kwargs)\n aliases = self.epicsLive._aliases\n if not self._det.get('epicsLive'):\n self._det['epicsLive'] = {}\n self._det['epicsLive'].update({'attrs': aliases})",
"def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)",
"def add(self, device_id, data):\n with self.lock:\n self.devices[device_id] = data",
"def add_extra_args(self):\n self.parser.add_argument('--device', dest='device', type=str, help='Device ID, e.g. d--0001')",
"def add_device(self, device, run=False, port=161):\n # if device\n # self.devices.append({\n # 'host': host,\n # 'port': port,\n # 'community': community\n # })\n self.devices.append(device)\n\n if run:\n t_s = multiprocessing.Process(\n name=device.ip,\n target=self.run_loop,\n args=(device,)\n )\n t_s.start()\n self.device_running.append(t_s)",
"def AddExtraDevice(self, guid_to_code_map: Dict[str, str]) -> None:\n self._extra_devices.update(guid_to_code_map)",
"def add(self, product):\n pass",
"def attach_device(self, device_data):\n self.attached_device = device_data",
"def add(self, DisableInformationPduTx=None, DisableNonInformationPduTx=None, EnableCriticalEvent=None, EnableDyingGasp=None, EnableLinkFault=None, EnableLoopbackResponse=None, EnableVariableResponse=None, Enabled=None, EthernetTypeUsedForDataTraffic=None, EventInterval=None, InformationPduCountPerSecond=None, LinkEventTxMode=None, LocalLostLinkTimer=None, LoopbackCmd=None, LoopbackTimeout=None, MacAddress=None, MaxOamPduSize=None, OperationMode=None, Oui=None, OverrideLocalEvaluating=None, OverrideLocalSatisfied=None, OverrideLocalStable=None, OverrideRemoteEvaluating=None, OverrideRemoteStable=None, OverrideRevision=None, OverrideSequenceNumber=None, Revision=None, SequenceNumber=None, SupportsInterpretingLinkEvents=None, SupportsRemoteLoopback=None, SupportsUnidirectionalMode=None, SupportsVariableRetrieval=None, VariableResponseTimeout=None, VendorSpecificInformation=None, Version=None):\n return self._create(locals())",
"def add_device_to_household(self, household_id, device_id, device_full_type):\n root_url = \"/upm/households/{household_id}/devices/{device_id}\".format(household_id=household_id,\n device_id=device_id)\n headers = {\"Source-ID\": \"PCT\", \"Source-Type\": \"PCT\"}\n payload = {\"deviceId\": str(device_id),\n \"deviceFullType\": device_full_type}\n\n add_device_to_household_response, http_code = self.request(root_url, headers=headers, json=payload)\n\n return add_device_to_household_response, http_code",
"def _add_device(self, uid, succeeded, data):\n # TODO: Bug: on discover the label in the label in the device option menu \n # doesn't change and if you try to select the first device it tells \n # you that it is already selected\n if succeeded:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = data\n self.device_menu[\"menu\"].add_command( label = \"%s (%s)\"%(\n self._uid_dict[uid][\"DEVICE_LABEL\"][\"label\"], uid), \n command = lambda:self.device_selected(uid))\n else:\n self._uid_dict.setdefault(uid, {})[\"DEVICE_LABEL\"] = {\"label\":\"\"}\n self.device_menu[\"menu\"].add_command( label = \"%s\" % uid, \n command = lambda:self.device_selected(uid))\n self._uid_dict[uid][\"index\"] = self.device_menu[\"menu\"].index(tk.END)",
"def add_hotspare(self, controller, enclosure, slot, disk_groups=None):\n if disk_groups:\n dgs = ','.join(str(d) for d in disk_groups)\n else:\n dgs = None\n\n self.run('/c{}/e{}/s{} add hotsparedrive{}'.format(\n controller,\n enclosure,\n slot,\n dgs and ' DGs={}'.format(dgs) or ''\n ))",
"def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)",
"def add_switch(self, dpid):\n\t\tself.switches.append(dpid)",
"async def async_step_add_device(self, user_input=None):\n # Use cache if available or fallback to manual discovery\n self.editing_device = False\n self.selected_device = None\n errors = {}\n if user_input is not None:\n if user_input[SELECTED_DEVICE] != CUSTOM_DEVICE:\n self.selected_device = user_input[SELECTED_DEVICE]\n\n return await self.async_step_configure_device()\n\n self.discovered_devices = {}\n data = self.hass.data.get(DOMAIN)\n\n if data and DATA_DISCOVERY in data:\n self.discovered_devices = data[DATA_DISCOVERY].devices\n else:\n try:\n self.discovered_devices = await discover()\n except OSError as ex:\n if ex.errno == errno.EADDRINUSE:\n errors[\"base\"] = \"address_in_use\"\n else:\n errors[\"base\"] = \"discovery_failed\"\n except Exception: # pylint: disable= broad-except\n _LOGGER.exception(\"discovery failed\")\n errors[\"base\"] = \"discovery_failed\"\n\n devices = {\n dev_id: dev[\"ip\"]\n for dev_id, dev in self.discovered_devices.items()\n if dev[\"gwId\"] not in self.config_entry.data[CONF_DEVICES]\n }\n\n return self.async_show_form(\n step_id=\"add_device\",\n data_schema=devices_schema(\n devices, self.hass.data[DOMAIN][DATA_CLOUD].device_list\n ),\n errors=errors,\n )",
"def add_powerup(powerup: str):\r\n global POWERUPS\r\n POWERUPS.append(powerup)",
"def fusion_api_discover_power_device(self, body, api=None, headers=None):\n return self.pd.create(body=body, api=api, headers=headers, param='/discover')",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"async def post_device_command(\r\n self, device_id, component_id, capability, command, args\r\n ) -> object:\r\n data = {\r\n \"commands\": [\r\n {\r\n \"component\": component_id,\r\n \"capability\": capability,\r\n \"command\": command,\r\n }\r\n ]\r\n }\r\n if args:\r\n data[\"commands\"][0][\"arguments\"] = args\r\n\r\n return await self.post(API_DEVICE_COMMAND.format(device_id=device_id), data)",
"def _new_device(device):\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )"
] | [
"0.6090695",
"0.59247094",
"0.5800191",
"0.5582035",
"0.55658615",
"0.55432975",
"0.547435",
"0.5461188",
"0.5461188",
"0.5461188",
"0.5454472",
"0.5427549",
"0.5402868",
"0.5326001",
"0.5296909",
"0.52893317",
"0.5280059",
"0.5199275",
"0.5180949",
"0.5117921",
"0.5114201",
"0.50858504",
"0.5078008",
"0.5064993",
"0.50578195",
"0.5049213",
"0.50158864",
"0.4992379",
"0.49863857",
"0.49541336"
] | 0.6119962 | 0 |
Updates a Power Delivery Device. [Arguments] | def fusion_api_edit_power_device(self, body, uri, api=None, headers=None):
return self.pd.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_device(cls, device_uuid, values):\n return cls.dbdriver.update_device(device_uuid, values)",
"def update_package(self, *args):\r\n\r\n temp = (self.newProj.device[0],\\\r\n self.newProj.device[1],\\\r\n self.devPackage.get(),\\\r\n self.newProj.device[3],\\\r\n self.newProj.device[4])\r\n\r\n del self.newProj.device\r\n\r\n self.newProj.device = temp\r\n\r\n kT.debug_log(self.newProj.device)\r\n\r\n del temp\r\n\r\n return",
"def edit_device(\n self,\n address: Any = None,\n duty1: Any = None,\n duty2: Any = None,\n freq1: Any = None,\n freq2: Any = None\n ) -> requests.Response:\n params = {\n 'address': address,\n 'duty1': duty1,\n 'duty2': duty2,\n 'freq1': freq1,\n 'freq2': freq2\n }\n return self._call('PATCH', '/devices', params=params)",
"def update(self):\n self.device = self._api.device_query(self._hardware_address, {})",
"def update(self):\n self.device.update()",
"def update(self):\n self.device.update()",
"def update(device_id, **params):\n params = _clean_salt_variables(params)\n\n api_response = requests.put(\n \"https://api.serverdensity.io/inventory/devices/\" + device_id,\n params={\"token\": get_sd_auth(\"api_token\")},\n data=params,\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\n \"Could not parse Server Density API Response content: %s\",\n api_response.content,\n )\n raise CommandExecutionError(\n \"Failed to create, API Response: {}\".format(api_response)\n )\n else:\n return None",
"def test_update_device(self):\n pass",
"def test_update_device(self):\n pass",
"def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)",
"def update_fpd(*args):\n return _ida_frame.update_fpd(*args)",
"def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info",
"def update(self):\n self._device.update()",
"def updateDevice(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['devices', 'configure'],\n 'operation': 'updateDevice'\n }\n resource = f'/devices/{serial}'\n\n body_params = ['name', 'tags', 'lat', 'lng', 'address', 'notes', 'moveMapMarker', 'switchProfileId', 'floorPlanId', ]\n payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}\n action = {\n \"resource\": resource,\n \"operation\": \"update\",\n \"body\": payload\n }\n return action",
"def update_device(device):\n payload = request.get_json()\n if ('name' in payload) and (payload['name'] != device):\n raise BadRequest(\n 'Device name does not match between URL and JSON payload')\n try:\n properties = devices.show(device)\n for k in payload:\n properties[k] = payload[k]\n except KeyDoesNotExist:\n properties = payload\n return _register_device(properties)",
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def updateDevice(self, *args):\r\n\r\n # Update the list of vision choices and the default vision choice\r\n self._appChoice[\"vision\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()]]\r\n self._appString[\"vision\"].set(self._appChoice[\"vision\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"vision\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"vision\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"vision\"].set(value))",
"def async_update_device(self) -> None:",
"def update_delivery(self, delivery: dict, new_data: dict):\n self.database.Deliveries.update(delivery, {'$set': new_data})\n return True",
"def update_firmware(self) -> str:",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"def _api_device_update(arguments, testbed, step, command, device=None, common_api=None):\n\n # if api and arguments both contains device, need to make sure\n # that the device in the arguments is as same as the one in api\n if (\n arguments.get('device')\n and device\n and arguments.get('device') not in [device.name, device.alias]\n ):\n\n step.errored('Device provided in the arguments {} '\n 'is not as same as the one provided in api {}'\n .format(arguments['device'], device))\n if device:\n\n return device\n\n # if not device in api and arguments contains device need to make sure\n # that device is a valid one and exist in the testbed\n if arguments.get('device'):\n\n try:\n arg_device = testbed.devices[arguments['device']]\n except KeyError as e:\n step.errored('Cannot find device {} in testbed'.format(\n arguments['device']))\n\n arguments['device'] = arg_device\n return arg_device\n\n # if common api\n if common_api:\n return Device(name='a', os='', custom={'abstraction':{'order': ['os']}})\n\n step.errored(\"No device is provided and the api '{}'\"\n \"is not a common api\".format(command))",
"async def async_device_update(self, warning=True):\n LOGGER.info(\"Update switch {name}\".format(name=self.name))\n await self.heater.async_update()",
"def update_device(self, device: Device) -> None:\n self._devices[device.name] = device",
"def update(self):\n\n if self._old:\n # delete wave from last update cycle\n self._pi.wave_delete(self._old)\n self._old = None\n\n # show power state\n self._pi.write(pins.Q4, self._state.powered)\n\n # update wave\n if self._state.ready:\n self._old = self._wave\n self._wave = self.create_wave(self._state)\n self._pi.wave_send_repeat(self._wave)\n\n # power down\n elif self._wave:\n self._write_all_low()\n self._old = self._wave\n self._wave = None",
"def upgrade_device(device, runtime):\n command = 'upgrade \"%s\" \"%s\"' % (device.udid, runtime.identifier)\n _run_command(command)",
"def test_update_pci_device(self):\n pass",
"def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)",
"def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()",
"def update(self, args):\n pass"
] | [
"0.63903296",
"0.59937346",
"0.58370966",
"0.57780063",
"0.57761675",
"0.57761675",
"0.5715946",
"0.56183934",
"0.56183934",
"0.56084883",
"0.56075984",
"0.5576775",
"0.5575302",
"0.5546185",
"0.5524716",
"0.5520713",
"0.54829216",
"0.5480364",
"0.5461826",
"0.5447018",
"0.5418728",
"0.54034",
"0.5401083",
"0.53712434",
"0.53580093",
"0.5346337",
"0.5325844",
"0.53052026",
"0.5291463",
"0.52501565"
] | 0.60343134 | 1 |
Sets the unit identification (UID) light state of the specified power delivery device. The device must be an HPE iPDU component with a locator light (HPE Intelligent Load Segment, AC Module, HPE Intelligent Outlet Bar, or HPE Intelligent Outlet). [Arguments] | def fusion_api_set_power_device_uid_state(self, body, uri, api=None, headers=None):
return self.pd.update(body=body, uri=uri, api=api, headers=headers, param='/uidState') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def script_set_device(self,udid=None):\n self.desired_caps['udid'] = udid;",
"def setLight(self, id, position, diffuse, specular, ambient):\r\n\t\t\r\n\t\tself.lights[id].set(position, diffuse, specular, ambient)",
"def light_set(self, pin='D13', value='0'):\n self.bridge.put(str(pin), str(value))",
"async def Turn_On_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": \"green\",\n }",
"def set_LED(name,light,value): #TODO UID convert to int\n name = _lookup(name)\n assert light in range(1,5), \"Error: light number must be an Integer between 1 and 4 inclusive\"\n assert value in range(4),\"Error: value must be an integer between 0 and 3 inclusive\"\n flag_data = list(name) + [-1,-1,-1,-1]\n flag_data[light] = value\n mc.set('flag_values',flag_data)",
"def on(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.power_on())\n console.print(f\"[{ip}] Light {id} On:\\n{json.dumps(resp, indent=2)}\")",
"def turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Turn on light %s %s\", self._device.ip, kwargs)\n if not self.is_on:\n self._device.power_on = True\n\n if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:\n self._device.brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:\n color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])\n self._device.color_temperature = color_temp",
"async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }",
"async def handle_set_light(self, match: Match[str], payload: str) -> None:\n uniqueid = match.group(1)\n\n # Find the light with that uniqueid\n for light_id in self._bridge.lights:\n light = self._bridge.lights[light_id]\n if light.uniqueid == uniqueid:\n try:\n state = LightSetState(**json.loads(payload))\n LOGGER.info(f\"Updating {light.name}\")\n await light.set_state(**state.dict())\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")\n return\n LOGGER.warning(f\"Unknown light uniqueid: {uniqueid}\")",
"def turnLightOn(ID):\n dislin.litmod(ID, 'ON')",
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"async def test_light_setup(\n hass: HomeAssistant,\n light: tuple[Light, str],\n):\n\n unique_id = light[0].id\n entity_id = light[1]\n\n entity_registry = er.async_get(hass)\n entity = entity_registry.async_get(entity_id)\n assert entity\n assert entity.unique_id == unique_id\n\n state = hass.states.get(entity_id)\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 64}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def fusion_api_set_power_device_power_state(self, body, uri, api=None, headers=None):\n return self.pd.update(body=body, uri=uri, api=api, headers=headers, param='/powerState')",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 32}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 256}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setLowPassU(new_low_pass_u):\n return RoboCaller().call(\"setLowPassU\", \"void\", new_low_pass_u * 1000)",
"def set_level(self, device_id, new_level):\n\t\treturn self.post(self.value_url % (ART_SERVER_HOST, device_id), {'value':new_level })",
"def set_led(self, *args, **kw):\n return self.execute_command('set_led', *args, **kw)",
"def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)",
"def identify(self):\n if self.cur_uid is None:\n return\n self.ola_thread.rdm_set(self.universe.get(), self.cur_uid, 0, \n \"IDENTIFY_DEVICE\", \n lambda b, s, uid = self.cur_uid:self._rdm_set_complete(uid, b, s), \n [self.id_state.get()])",
"def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))",
"def set_power_unit(self, power_unit: PowerUnit) -> None:\n\n # 0: Current power units are dBm. 1: Current power units are Watts.\n if power_unit == PowerUnit.dBm:\n unit_argument = int(0)\n elif power_unit == PowerUnit.W:\n unit_argument = int(1)\n else:\n raise ValueError(\"power unit {} is not supported on this device\".format(power_unit))\n\n #:SENSe[n][:CHANnel[m]]:POWer:UNIT/?\n self._inst.write(\"SENS:POW:UNIT {}\".format(unit_argument))",
"async def async_turn_on(self, **kwargs: Any) -> None:\n if (color_temp := kwargs.get(ATTR_COLOR_TEMP)) is not None:\n self._device.light_color_temp = color_temperature_mired_to_kelvin(\n color_temp\n )\n await super().async_turn_on(**kwargs)",
"def set_device(self, device):\n self.device = device",
"def change_device(self):\n if self.state.ser:\n UsbHost.close_port(self.state.ser)\n device = self.CBDevices.currentText()\n if device:\n comport = self.devices[int(device)]\n self.state.ser = UsbHost.open_port(comport)\n if not self.state.ser:\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n answer: str = self.UsbHost.send_command(self.state.ser, \"ping\", device)\n if answer in wrong_answers:\n error_message(\"Выбранный девайс не отвечает\")\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n self.state.device_id = int(device)\n self.state.comport = comport\n self.create_message()\n self.set_controls_state(True)\n self.BtnL1.click()\n self.BtnAttenuate.click()\n self.SpinDACValue.setValue(35000)\n self.BtnSetDACValue.click()\n self.set_sw(\"0 1\")",
"def _set_light(self, new_state):\n try:\n self._device.lights = new_state\n except requests.Timeout:\n _LOGGER.error(\"Time out setting %s light to %s\", self.entity_id, new_state)\n return\n\n self._light_on = new_state == ON_STATE\n self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY\n self.async_write_ha_state()",
"def set_state(self, bridge_id, device_id, mode,\n brightness=255, color_temperature=500):\n logging.info('bridge_id = %s, device_id = %d, mode = %s, '\n 'brightness = %s, color temp = %s',\n bridge_id, device_id, mode, brightness,\n color_temperature)\n\n bridge = self._bridges.get(bridge_id, None)\n if not bridge:\n logging.error('Bridge %s not found!', bridge_id)\n return\n\n command = {'on' : mode,\n 'bri' : brightness}\n if color_temperature is not None:\n command['ct'] = color_temperature\n\n bridge.set_light(device_id, command)",
"def __init__(self, device: SensemeDevice) -> None:\n super().__init__(device, f\"{device.name} Light\")\n self._attr_supported_color_modes = {ColorMode.COLOR_TEMP}\n self._attr_color_mode = ColorMode.COLOR_TEMP\n self._attr_min_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_max\n )\n self._attr_max_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_min\n )",
"def _send_device_command(self, requested_state, requested_data):\n if requested_state:\n if requested_data is not None:\n self._brightness = int(requested_data)\n\n self._tellcore_device.dim(self._brightness)\n else:\n self._tellcore_device.turn_off()"
] | [
"0.5628526",
"0.5579819",
"0.55316037",
"0.54965574",
"0.54312074",
"0.53871113",
"0.53655046",
"0.53386843",
"0.5328693",
"0.52510697",
"0.52317244",
"0.5219277",
"0.51877314",
"0.51787025",
"0.51706076",
"0.5162918",
"0.51415217",
"0.5139538",
"0.5137304",
"0.51330054",
"0.512238",
"0.50953984",
"0.5065665",
"0.50525063",
"0.50353086",
"0.5023313",
"0.49913007",
"0.4972079",
"0.4945298",
"0.49073222"
] | 0.610139 | 0 |
Removes Power Delivery Devices. If name or uri are not specified, all PDDs are removed. [Arguments] | def fusion_api_remove_power_device(self, name=None, uri=None, api=None, headers=None):
return self.pd.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)",
"def removeDevice(self, node, fullDeviceName):",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False",
"def remove_device(self, path):\n pass",
"def firmware_pack_remove(handle, org_name, name, org_parent=\"org-root\"):\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" %org_name)\n else:\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info(\"Firmware host pack <%s> not found.Nothing to remove\" % name)\n else:\n handle.remove_mo(mo)\n handle.commit()",
"def pop_adv_devices(self):\r\n if self.localSDK.devList:\r\n del self.localSDK.devList[:]\r\n try:\r\n self.localSDK.get_devices() # Get list of boards from KSDK manifest file\r\n except IOError:\r\n self.localSDK.devList = ['None']\r\n return",
"def main_remove(args):\n return remove_command(args.directory, args.name)",
"def fusion_api_remove_power_device_synchronously(self, uri, api=None, headers=None):\n return self.pd.delete(uri=uri, api=api, headers=headers, param='/synchronous')",
"def clear_all_devices():\n adapter = get_adapter()\n for key in devices_by_adr.keys():\n device = get_device(key)\n try:\n adapter.RemoveDevice(device) \n except DBusException:\n print(\"could not remove\", device)",
"def remove_powerup(powerup: str):\r\n global POWERUPS\r\n POWERUPS.remove(powerup)",
"def remove(self, packages):\n if packages:\n cmd = ['dnf', 'remove'] + list(packages)\n subprocess.Popen(cmd).wait()",
"def delete_unavailable_devices():\n _run_command('delete unavailable')",
"def rm(args):\n args.delete = True\n return remove(args)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)",
"def snap_remove(packages, *flags):\n if type(packages) is not list:\n packages = [packages]\n\n flags = list(flags)\n\n message = 'Removing snap(s) \"%s\"' % ', '.join(packages)\n if flags:\n message += ' with options \"%s\"' % ', '.join(flags)\n\n log(message, level='INFO')\n return _snap_exec(['remove'] + flags + packages)",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"async def test_device_remove_devices_nvr(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n assert await async_setup_component(hass, \"config\", {})\n\n ufp.api.get_bootstrap = AsyncMock(return_value=ufp.api.bootstrap)\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n entry_id = ufp.entry.entry_id\n\n device_registry = dr.async_get(hass)\n\n live_device_entry = list(device_registry.devices.values())[0]\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )",
"def unlink_devices(self):\n if self.driver.wait_for_object(\"unlink_some_devices_btn\", format_specifier=[self.get_text_from_str_id(\"unlink_some_devices_btn\")],timeout=10, raise_e=False):\n self.driver.click(\"unlink_some_devices_btn\", format_specifier=[self.get_text_from_str_id(\"unlink_some_devices_btn\")])\n self.driver.wait_for_object(\"unlink_devices_cb\", timeout=20)\n devices_cbs = self.driver.find_object(\"unlink_devices_cb\", multiple=True)\n for cb in devices_cbs:\n cb.click()\n self.driver.click(\"unlink_devices_unlink_btn\")\n self.driver.wait_for_object(\"unlink_confirmation_popup_confirm_btn\", timeout=10)\n self.driver.click(\"unlink_confirmation_popup_confirm_btn\")\n self.driver.wait_for_object(\"continue_btn\", timeout=30)\n self.driver.click(\"continue_btn\")",
"def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.delete(args)",
"def delete(socket, args, config, library, cmd=False):\n files=args['<nameid>']\n ignore=args['--ignore']\n\n for nameid in files:\n receipt = library.get_receipt( nameid )\n if not receipt:\n if cmd: print \"Could not find receipt for:\",nameid\n if not ignore: return False\n continue\n\n if receipt.get_oid() == None:\n if cmd: print \"You do not have deletion permission for:\",nameid\n if not ignore: return False\n continue\n\n if cmd: print \"Delete\", receipt.get_filename(), \"?\"\n\n response = raw_input(\"Are you sure? [y/N]\")\n if response.lower() not in ['yes','y']:\n print \"File was not deleted.\"\n return False\n\n if delete_file( socket, receipt ):\n #Succeeded, so remove receipt from library\n library.remove_receipt( nameid )\n\n if cmd: print \"Deletion succeeded!\"\n elif cmd: print \"Deletion failed!\"\n\n # Return Success.\n return True",
"def removeFake(v):\n\n if len(v.fakes) > 0:\n menu.menuBanner(v)\n i = 1\n print(\" --------------------------------------------------------\")\n for x in v.fakes:\n print(f\" {i} - {x}\")\n i += 1\n print(f\" {i} - ALL\")\n print(\" --------------------------------------------------------\")\n try:\n sel = int(input(\" Enter selection you want to delete: \")) - 1\n except ValueError:\n print(\" \" + bcolors.WARNING + \"Only input integers\" + bcolors.ENDC)\n time.sleep(1)\n return\n except KeyboardInterrupt:\n return\n\n if not 0 <= sel < i:\n print(\" \" + bcolors.WARNING + str(sel + 1) + \" is not a selection\" + bcolors.ENDC)\n time.sleep(1)\n return\n\n if sel == len(v.fakes):\n v.fakes = []\n return\n\n\n bash = (\"ip addr del \" + v.fakes[sel] + \"/0 dev dummy label dummy:\" + str(sel))\n os.system(bash)\n v.fakes.pop(sel)\n return\n else:\n print(\" \" + bcolors.WARNING + \"No fake NICs\" + bcolors.ENDC)\n time.sleep(1)\n return",
"async def test_device_remove_devices(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n light: Light,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n\n await init_entry(hass, ufp, [light])\n assert await async_setup_component(hass, \"config\", {})\n entity_id = \"light.test_light\"\n entry_id = ufp.entry.entry_id\n\n registry: er.EntityRegistry = er.async_get(hass)\n entity = registry.async_get(entity_id)\n assert entity is not None\n device_registry = dr.async_get(hass)\n\n live_device_entry = device_registry.async_get(entity.device_id)\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )\n\n dead_device_entry = device_registry.async_get_or_create(\n config_entry_id=entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"e9:88:e7:b8:b4:40\")},\n )\n assert (\n await remove_device(await hass_ws_client(hass), dead_device_entry.id, entry_id)\n is True\n )",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"async def wipe(self, params):\n # Only import if needed\n import rpi_ws281x as ws\n\n loop = params.get('loop', True)\n colors = params.get('colors', [])\n if len(colors) < 1:\n colors.append({\n 'red': 0, 'green': 0, 'blue': 255,\n 'hold': 0, 'wait_ms': 40\n })\n if loop and len(colors) < 2:\n colors.append({\n 'red': 0, 'green': 0, 'blue': 0,\n 'hold': 0, 'wait_ms': 40\n })\n led_colors = [\n (\n ws.Color(\n color.get('red', 0),\n color.get('green', 0),\n color.get('blue', 0)\n ),\n color.get('wait_ms', 40),\n color.get('hold_ms', 0)\n )\n for color in colors\n ]\n try:\n while True:\n for (led_color, wait_ms, hold_duration) in led_colors:\n await self.lights.color_wipe(led_color, wait_ms=wait_ms)\n await asyncio.sleep(hold_duration / 1000.0)\n if not loop:\n break\n except KeyboardInterrupt:\n pass",
"def remove_all_wlan(adapter_name = \"\"):\n\n # Obtain the IDs of the specified adapter\n guid, name = _get_guid(adapter_name)\n\n # Get the list of the profiles created on the adapter\n cmd = \"%s gpl %s\" % (_wlantool_cmd, guid)\n pattern = '[\\t ]+\"([a-zA-Z0-9_-]+)\"'\n profile_list = []\n output = os.popen(cmd)\n buffer = \"\"\n for line in output:\n match_obj = re.match(pattern, line)\n if match_obj:\n profile_list.append(match_obj.group(1))\n buffer += line\n if buffer.find(\"completed successfully\") == -1:\n raise Exception(\"Unable to get list of profiles on the adapter \\\"%s\\\"\" % name)\n\n # And remove them all\n for profile in profile_list:\n cmd = \"%s dp %s %s\" % (_wlantool_cmd, guid, profile)\n output = os.popen(cmd)\n buffer = \"\".join(line for line in output)\n if buffer.find(\"completed successfully\") == -1:\n raise Exception(\"Unable to remove the profile %s from wireless adapter \\\"%s\\\"\" % (profile, name))",
"def test_delete_device(self):\n pass",
"def test_delete_device(self):\n pass"
] | [
"0.5918086",
"0.5835725",
"0.5688285",
"0.5576145",
"0.5496549",
"0.542299",
"0.54133373",
"0.5398436",
"0.5389712",
"0.5334971",
"0.53183067",
"0.5288954",
"0.52694654",
"0.52305156",
"0.52251345",
"0.52117527",
"0.5201765",
"0.5181762",
"0.51782036",
"0.51671505",
"0.5139199",
"0.5114838",
"0.5070422",
"0.5062247",
"0.50308216",
"0.49959862",
"0.49905267",
"0.49857706",
"0.4979079",
"0.4979079"
] | 0.6674705 | 0 |
Removes the specified Power Delivery Device synchronously. [Arguments] | def fusion_api_remove_power_device_synchronously(self, uri, api=None, headers=None):
return self.pd.delete(uri=uri, api=api, headers=headers, param='/synchronous') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_remove_power_device(self, name=None, uri=None, api=None, headers=None):\n return self.pd.delete(name=name, uri=uri, api=api, headers=headers)",
"async def async_device_removed(event):\n if event.data[\"action\"] != \"remove\":\n return\n await async_remove_automations(hass, event.data[\"device_id\"])",
"def remove_device(self, path):\n pass",
"def removeDevice(self, node, fullDeviceName):",
"def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)",
"async def _handle_device_remove(hass: HomeAssistant):\n\n async def device_registry_updated(event: Event):\n if event.data['action'] != 'update':\n return\n\n registry = hass.data['device_registry']\n hass_device = registry.async_get(event.data['device_id'])\n\n # check empty identifiers\n if not hass_device or not hass_device.identifiers:\n return\n\n identifier = next(iter(hass_device.identifiers))\n\n # handle only our devices\n if identifier[0] != DOMAIN or hass_device.name_by_user != 'delete':\n return\n\n # remove from Mi Home\n for gw in hass.data[DOMAIN].values():\n if not isinstance(gw, Gateway3):\n continue\n gw_device = gw.get_device(identifier[1])\n if not gw_device:\n continue\n gw.debug(f\"Remove device: {gw_device['did']}\")\n gw.miio.send('remove_device', [gw_device['did']])\n break\n\n # remove from Hass\n registry.async_remove_device(hass_device.id)\n\n hass.bus.async_listen('device_registry_updated', device_registry_updated)",
"def remove_powerup(powerup: str):\r\n global POWERUPS\r\n POWERUPS.remove(powerup)",
"async def test_device_remove_devices_nvr(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n assert await async_setup_component(hass, \"config\", {})\n\n ufp.api.get_bootstrap = AsyncMock(return_value=ufp.api.bootstrap)\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n entry_id = ufp.entry.entry_id\n\n device_registry = dr.async_get(hass)\n\n live_device_entry = list(device_registry.devices.values())[0]\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )",
"def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False",
"def deleteDevice(serial):\n swDB = switchdb.DB()\n swDB.deleteBySerial(serial)\n swDB.close()",
"def remove(self):\n\t\tcall_sdk_function('PrlBootDev_Remove', self.handle)",
"def dmcrypt_unmap(\n _uuid\n ):\n args = [\n 'cryptsetup',\n 'remove',\n _uuid\n ]\n\n try:\n command_check_call(args)\n\n except subprocess.CalledProcessError as e:\n raise Error('unable to unmap device', _uuid, e)",
"def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device",
"async def test_device_remove_devices(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n light: Light,\n hass_ws_client: WebSocketGenerator,\n) -> None:\n\n await init_entry(hass, ufp, [light])\n assert await async_setup_component(hass, \"config\", {})\n entity_id = \"light.test_light\"\n entry_id = ufp.entry.entry_id\n\n registry: er.EntityRegistry = er.async_get(hass)\n entity = registry.async_get(entity_id)\n assert entity is not None\n device_registry = dr.async_get(hass)\n\n live_device_entry = device_registry.async_get(entity.device_id)\n assert (\n await remove_device(await hass_ws_client(hass), live_device_entry.id, entry_id)\n is False\n )\n\n dead_device_entry = device_registry.async_get_or_create(\n config_entry_id=entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"e9:88:e7:b8:b4:40\")},\n )\n assert (\n await remove_device(await hass_ws_client(hass), dead_device_entry.id, entry_id)\n is True\n )",
"def remove_device(request, pk):\n device = get_object_or_404(Laptop, pk=pk)\n context = {}\n if request.method == 'POST':\n form = RemovalForm(request.POST)\n if form.is_valid():\n device.mdm_enrolled = False\n device.serial = None\n device.asset_tag = None\n device.last_ip = None\n device.last_checkin = None\n device.save()\n template = loader.get_template('default.html')\n return HttpResponse(template.render({'title': 'Device Removed',\n 'message': 'This device is no longer associated with the MDM.',\n 'EXIT_BTN': True, 'EXIT_URL': reverse(\"mdm:list\"), 'NO_FOOT': True},\n request))\n else:\n context['form'] = RemovalForm(request.POST)\n else:\n if device.serial == 'DISCONNECTED':\n context['form'] = RemovalForm(uninstalled=True)\n else:\n context['form'] = RemovalForm()\n return render(request, 'form_crispy.html', context)",
"def _DisconnectAP(self):\n disconnect_command = 'iw dev {interface} disconnect'.format(\n interface=self.interface)\n # This call may fail if we are not connected to any network.\n self._device.Call(disconnect_command)",
"def delete_device(self, device: Device) -> None:\n self._devices.pop(device.name, None)",
"def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None",
"def delete_device(self):\n # PROTECTED REGION ID(CspSubElementSubarray.delete_device) ENABLED START #\n # PROTECTED REGION END # // CspSubElementSubarray.delete_device",
"def do_charge_purchase_delete(cs, args):\n cs.charge_purchases.delete(args.charge_purchase_id)",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"async def async_remove_automations(hass, device_id):\n await device_trigger.async_remove_triggers(hass, device_id)",
"def hfp_firmware_pack_item_remove(handle, org_dn, hfp_name, hw_vendor,\r\n hw_model, type):\r\n\r\n hfp_dn = org_dn + \"/fw-host-pack-\" + hfp_name\r\n dn = hfp_dn + \"/pack-image-\" + hw_vendor + \"|\" + hw_model + \"|\" + type\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"FirmwarePackItem '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n return mo",
"def erase_device(device):\n command = 'erase \"%s\"' % (device.udid,)\n _run_command(command)",
"def remove(self):\n\t\tcall_sdk_function('PrlVmDevHdPart_Remove', self.handle)",
"def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()",
"def delete_device(device):\n if device in devices.list():\n devices.delete(device)\n return '', 204\n else:\n raise BadRequest('The given device name does not exist')",
"async def test_device_remove_devices(\n hass: HomeAssistant,\n hass_ws_client: WebSocketGenerator,\n mock_config_entry: MockConfigEntry,\n mock_jellyfin: MagicMock,\n device_registry: dr.DeviceRegistry,\n) -> None:\n assert await async_setup_component(hass, \"config\", {})\n\n mock_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n device_entry = device_registry.async_get_device(\n identifiers={\n (\n DOMAIN,\n \"DEVICE-UUID\",\n )\n },\n )\n assert (\n await remove_device(\n await hass_ws_client(hass), device_entry.id, mock_config_entry.entry_id\n )\n is False\n )\n old_device_entry = device_registry.async_get_or_create(\n config_entry_id=mock_config_entry.entry_id,\n identifiers={(DOMAIN, \"OLD-DEVICE-UUID\")},\n )\n assert (\n await remove_device(\n await hass_ws_client(hass), old_device_entry.id, mock_config_entry.entry_id\n )\n is True\n )",
"def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)",
"async def async_turn_off(self, **kwargs):\n self._wrap_device.device.set_duct_zone(self._zone, False)"
] | [
"0.6188519",
"0.6048444",
"0.60359097",
"0.59953356",
"0.5765042",
"0.5729715",
"0.5692872",
"0.5631123",
"0.5551241",
"0.55478895",
"0.5492011",
"0.54828435",
"0.5462886",
"0.545424",
"0.5438797",
"0.538788",
"0.5366249",
"0.5357067",
"0.5307916",
"0.5284972",
"0.5238631",
"0.52350897",
"0.5228471",
"0.52243483",
"0.521877",
"0.5196072",
"0.5165972",
"0.5152481",
"0.5145828",
"0.51179874"
] | 0.65902627 | 0 |
Retrieves the unit identification (UID) state (on, off, unknown) of the specified power outlet or extension bar resource. The device must be an HPE iPDU component with a locator light (HPE Intelligent Load Segment, AC Module, HPE Intelligent Outlet Bar, or HPE Intelligent Outlet). [Arguments] | def fusion_api_get_power_device_uid_state(self, uri=None, api=None, headers=None):
return self.pd.get(uri=uri, api=api, headers=headers, param='/uidState') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"def uber_syntax(self):\n returned = self.get_a_device_id()\n if returned:\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", parameters={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV1\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", parameters={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=[DEVICE_ID])):\n returned = False\n\n return returned",
"def test_device_status(self):\n #071031031E3067\n self.ms.add_response({'\\x14071031031E3067\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.status((49, 3))\n self.assertTrue(response)",
"def read_uid(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_READ_UID, (), '', 12, 'I')",
"def read_lumi_counter(device):\n return read(device, \"gt_mp7_frame.rb.tcm_status.luminosity_seg_nr\")",
"def identify(self):\n if self.cur_uid is None:\n return\n self.ola_thread.rdm_set(self.universe.get(), self.cur_uid, 0, \n \"IDENTIFY_DEVICE\", \n lambda b, s, uid = self.cur_uid:self._rdm_set_complete(uid, b, s), \n [self.id_state.get()])",
"def fusion_api_set_power_device_uid_state(self, body, uri, api=None, headers=None):\n return self.pd.update(body=body, uri=uri, api=api, headers=headers, param='/uidState')",
"def get_state(self, *outlets):\n\n outlet_ids = []\n for outlet in outlets:\n outlet_ids.append('.'.join([self.oid, str(outlet)]))\n return self.snmp_client.snmp_get(*outlet_ids)",
"def get_uid_state(self, id_or_uri):\n uri = self._client.build_uri(id_or_uri) + \"/uidState\"\n return self._client.get(uri)",
"def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)",
"def find_device_info(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['runDestination']['targetDeviceRecord']['modelUTI']['_value']\n return result",
"def get_device_output(device_id: Optional[pulumi.Input[str]] = None,\n field_mask: Optional[pulumi.Input[Optional[str]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n registry_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDeviceResult]:\n ...",
"def state(self):\n\n if self._device_attribute == ATTR_INSIDE_TEMPERATURE:\n if self._api.roomTemperature == 126 or self._api.roomTemperature == None:\n return 'unavailable'\n else:\n return self._api.roomTemperature\n\n if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE:\n if self._api.outdoorTemperature == 126 or self._api.outdoorTemperature == None:\n return 'unavailable'\n else:\n return self._api.outdoorTemperature\n return None",
"def read_uid(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_READ_UID, (), '', 'I')",
"def getUnitType(self, id):\n self.send(\"#5\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP800\"\n self.send(\"#7\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAP400\"\n self.send(\"#4\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"PSR1212\"\n self.send(\"#6\" + str(id) + \" SERECHO 1 \\r\")\n if self.readResponse() == \"1\":\n return \"XAPTH2\"\n return \"No Device Found\"",
"def _do_get_status(self):\n logging.info(__name__ + ' : Get status of the device.')\n result = self._execute('X')\n usage = {\n 0: \"Channel not in use\",\n 1: \"Channel used for Nitrogen level\",\n 2: \"Channel used for Helium Level (Normal pulsed operation)\",\n 3: \"Channel used for Helium Level (Continuous measurement)\",\n 9: \"Error on channel (Usually means probe unplugged)\"\n }\n # current_flowing = {\n # 0 : \"Curent not flowing in Helium Probe Wire\",\n # 1 : \"Curent not flowing in Helium Probe Wire\"\n # }\n # auto_fill_status = {\n # 00 : \"End Fill (Level > FULL)\",\n # 01 : \"Not Filling (Level < FULL, Level > FILL)\",\n # 10 : \"Filling (Level < FULL, Level > FILL)\",\n # 11 : \"Start Filling (Level < FILL)\"\n # }\n return usage.get(int(result[1]), \"Unknown\")",
"def device_selected(self, uid):\n if uid == self.cur_uid:\n print \"Already Selected\"\n return\n # This line is going to return \"DEVICE_LABEL\" so you may as well skip it\n pid_key = \"DEVICE_LABEL\"\n self.dev_label.set(\"%s (%s)\"%(self._uid_dict[uid][pid_key][\"label\"], uid))\n self.ola_thread.rdm_get(self.universe.get(), uid, 0, \"IDENTIFY_DEVICE\", \n lambda b, s, uid = uid:self._get_identify_complete(uid, b, s))\n\n if \"SUPPORTED_PARAMETERS\" not in self._uid_dict[uid]:\n self.ola_thread.rdm_get(\n self.universe.get(), uid, 0, \"SUPPORTED_PARAMETERS\",\n lambda b, l, uid = uid:self._get_pids_complete(uid, b, l))\n else:\n self._notebook.Update()\n self.cur_uid = uid",
"def redirected_opid_syntax(self):\n returned = self.get_a_device_id()\n if returned:\n if not self.valid_status_code(falcon.GetDeviceDetails(DEVICE_ID)):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(parameters={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(falcon.GetDeviceDetails(body={\"ids\": [DEVICE_ID]})):\n returned = False\n return returned",
"def fusion_api_get_power_device_power_state(self, uri=None, api=None, headers=None):\n return self.pd.get(uri=uri, api=api, headers=headers, param='/powerState')",
"def _read_device_state():\n \n try:\n _debug_print(\"Connecting to bus...\")\n i2c_bus = smbus.SMBus(_bus_id)\n\n current_state = i2c_bus.read_byte(_device_addr) & 0x0F\n\n return int(current_state)\n\n except:\n print(\"Error: There was a problem reading from the device\")\n # Best to re-raise as we can't recover from this\n raise",
"def info_equipment_reactors_get():\n equipment = _equipment_by_group(438) # 438 == Mobile Reactor\n return equipment, 200",
"def hdu_info(self):\n return self._hdusinfo",
"def device_info(device_id):\n device_info_map = listall.device_raw_info()[\"devices\"]\n for operating_system in device_info_map.keys():\n devices = device_info_map[operating_system]\n for device in devices:\n if device[\"udid\"].lower() == device_id.lower():\n return device\n return None",
"def get_power_state(self, id_or_uri):\n uri = self._client.build_uri(id_or_uri) + \"/powerState\"\n return self._client.get(uri)",
"def getLabel(device):\n cmd = \"/sbin/blkid -s LABEL -o value %s\" % device\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n return proc.communicate()[0].strip()",
"def process_device(spc, device):\n try:\n d = device.get();\n print(\"Processing device: \", device.name)\n me_href = d['managed-elements']['managed-element'].get('href')\n me = factory.fetch_resource(spc, me_href)\n\n # Fetch Physical Termination Points\n ptps = me.ptps.get()\n for p in ptps:\n p.get()\n\n # Fetch equipment inventory\n ehs = me.equipment_holders.get()\n for eh in ehs:\n eh.get()\n\n # Fetch software inventory\n me.software_identities.get()\n\n # Fetch relevant configuration\n try:\n device.configurations.expanded.post(xpaths=[\n '/configuration/version',\n '/configuration/routing-instances',\n '/configuration/access/radius-server',\n '/configuration/system/domain-name',\n '/configuration/routing-options/router-id',\n '/configuration/interfaces/interface[name=\"lo0\"]'])\n except:\n pass\n\n return device.name\n except:\n raise Exception(\"Failed to process %s due to %s\" % (device.name, sys.exc_info()[1]))",
"def get_humidity(intent, session):\n\n session_attributes = {}\n reprompt_text = None\n\n humidity = get_thing_state(\"MegaIf1\", \"Humidity%\")\n speech_output = \"The humidity is \" + humidity + \" percent. \" \\\n \". Goodbye.\"\n should_end_session = True\n \n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))",
"def get_device(arn=None):\n pass",
"def temperature():\n snmp.temperature()\n return 0",
"def request_identifier(self):\n question = jbus.jbus_generator_read(self.node, 0x1000, 12)\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n result = self.verify_response(question, answer)\n if (result == \"OK\"):\n result = {\n \"UPS_type\" : self.extract_word(answer,0),\n \"Power_KVA\" : self.extract_word(answer,1)/10,\n \"SN\" : chr(answer[10])+\n chr(answer[9])+\n chr(answer[12])+\n chr(answer[11])+\n chr(answer[14])+\n chr(answer[13])+\n chr(answer[16])+\n chr(answer[15])+\n chr(answer[18])+\n chr(answer[17])\n }\n return result\n else:\n self.error=result\n return False"
] | [
"0.53774107",
"0.5238666",
"0.52377576",
"0.51540244",
"0.5129143",
"0.50680953",
"0.50527173",
"0.49321425",
"0.48873627",
"0.47880295",
"0.4765444",
"0.47391614",
"0.4702631",
"0.47014758",
"0.46920732",
"0.46891165",
"0.46790287",
"0.46749437",
"0.46376628",
"0.46135613",
"0.46124074",
"0.46087286",
"0.45870808",
"0.45826903",
"0.45752907",
"0.44919443",
"0.44795138",
"0.44683918",
"0.4463596",
"0.44398898"
] | 0.58998233 | 0 |
Adds a proxy server to the appliance [Arguments] | def fusion_api_add_proxy_server(self, body, api=None, headers=None):
return self.proxyserver.add(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)",
"def configure_proxy(self, proxy):\n server_name = self.get_external_domain()\n tls_enabled = self.get_tls()\n ircd_enabled = self.charm_config.get(\"enable-ircd\")\n federation_enabled = self.get_federation()\n\n if tls_enabled:\n self.external_port = 443\n else:\n self.external_port = 80\n\n proxy_config = [\n {\n \"mode\": \"http\",\n \"external_port\": self.external_port,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8008,\n \"subdomain\": server_name,\n },\n ]\n\n if federation_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_federation_mode(),\n \"external_port\": 8448,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8448,\n }\n )\n\n if ircd_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_irc_mode(),\n \"external_port\": self.get_irc_port(),\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": self.irc_internal_port,\n }\n )\n\n proxy.configure(proxy_config)",
"def set_launch_args_proxy(self, launch_options: Dict[str, Any]) -> None:\r\n launch_options['args'] = [\r\n a for a in launch_options.get('args', []) if not a.startswith('--proxy-server=')] \\\r\n + [f'--proxy-server=\"{launch_options[\"proxy\"]}\"']",
"def start_proxy(args, address):\n logging.info(\"starting transparent proxy for {0}\".format(address))\n \n # Prevent packet leaks (https://lists.torproject.org/pipermail/tor-talk/2014-March/03507.html)\n insert_rule([\"OUTPUT\", \"-m\", \"conntrack\", \"--ctstate\", \"INVALID\", \"-j\", \"DROP\"])\n insert_rule([\"OUTPUT\", \"-m\", \"state\", \"--state\", \"INVALID\", \"-j\", \"DROP\"])\n\n # Add DNS route\n rule = [\"PREROUTING\", \"-s\", address, \"-p\", \"udp\", \"--dport\", \"53\",\n \"-j\", \"REDIRECT\", \"--to-ports\", str(args.dnsport)]\n \n if args.interface:\n rule += [\"-i\", args.interface]\n\n insert_rule(rule, table=\"nat\")\n\n # Add TCP route -- ignore Cuckoo result server port\n rule = [\"PREROUTING\", \"-s\", address, \"-p\", \"tcp\", \"--syn\", \"!\", \"--dport\", str(args.resultport),\n \"-j\", \"REDIRECT\", \"--to-ports\", str(args.proxyport)]\n\n if args.interface:\n rule += [\"-i\", args.interface]\n\n insert_rule(rule, table=\"nat\")",
"def add_proxy(self, app: Flask, handle_errors: bool = True, auth: list = [\"\"]) -> Flask:\n raise NotImplemented('proxys are not yet supported')\n if hasattr(app, 'HOIST_INTERNALPROXY'):\n raise HoistExistsError('hoist is already set up on app')\n\n\n app.HOIST_INTERNALPROXY = HoistProxy(app, handle_errors)\n\n @app.route('/hoist/proxy/connect', methods=['POST'])\n def hoist_proxy_connect() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALPROXY._connect, 'data')\n\n @app.route('/hoist/proxy/disconnect', methods=['POST'])\n def hoist_proxy_disconnect() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALPROXY._disconnect, 'data')\n\n\n return app",
"def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n self.__proxy = (proxytype, addr, port, rdns, username, password)",
"def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n self.__proxy = (proxytype, addr, port, rdns, username, password)",
"def configureProxy():\n # config\n port = config.get(\"proxy\", \"port\")\n allowedDomains = config.get(\"proxy\", \"alloweddomains\")\n listeningIP = config.get(\"hotspot\", \"ip\")\n # wan dns\n proxyNSConfig = \"\"\n for dnsServer in wandns:\n proxyNSConfig = f\"{proxyNSConfig}nserver {dnsServer}\\n\"\n # 3proxy configurations\n proxyConfig = f\"\"\"#!/bin/3proxy\n#daemon\npidfile /var/run/3proxy.pid\nchroot /usr/local/3proxy proxy proxy\nnscache 65536\n{proxyNSConfig}\nlog /logs/3proxy-%y%m%d.log D\nrotate 1\ncounter /count/3proxy.3cf\ninclude /conf/counters\ninclude /conf/bandlimiters\nauth iponly\nallow * * {allowedDomains}\ndeny *\nproxy -e{wanip} -i{listeningIP} -p{port}\n\"\"\"\n confFile = open(\"/etc/3proxy/3proxy.cfg\", \"w\")\n confFile.write(proxyConfig)\n confFile.close()",
"def _add_proxy(self, proxy):\t\n\t\turi = proxy.uri\n\t\tif not proxy.connected:\n\t\t\traise TypeError('Worker {} is not available'.format(uri))\n\n\t\tif not proxy.is_worker:\n\t\t\tPrint('Proxy {} is not a compatible worker. You need to subclass Worker'.format(uri))\n\t\t\treturn\n\n\t\tprint('Worker {} is available'.format(uri))\n\t\tself._uris.add(uri)\n\t\tQueue._put(self, proxy)",
"def set_proxy(self, proxy, user=None):\n proxy_handler = urllib2.ProxyHandler({'http':proxy})\n proxy_auth_handler = urllib2.ProxyBasicAuthHandler()\n if user:\n proxy_auth_handler.add_password('realm', 'host', user[0], user[1])\n \n opener = urllib2.build_opener(proxy_handler, proxy_auth_handler)\n urllib2.install_opener(opener)",
"def proxy_settings(self):\n if config.proxy_host is None or config.proxy_host == \"\":\n return\n\n proxy = urllib2.ProxyHandler({\"http\": config.proxy_host})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)",
"def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype,addr,port,rdns,username,password)",
"def _swift_proxy_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('proxy')\n self._swift_install('proxy')\n self._set_onhold('proxy')\n self._final_install_touches('proxy')",
"def register_proxy(self, proxy):\n self.__proxy = proxy",
"def proxy(c, path=local.proxy_path):\r\n c = conn(c)\r\n c.run('rm {conf} -rf'.format(conf=local.proxy_conf))\r\n\r\n \"\"\" 这里需要分开安装:先安装 epel-release 之后,才能安装 其他server\r\n \"\"\"\r\n system.install(c, 'source')\r\n system.install(c, 'apt-cacher-ng')\r\n\r\n from common.disk import file_exist\r\n if not file_exist(c, local.proxy_conf):\r\n print(\"conf file {} not exist\".format(local.proxy_conf))\r\n exit(-1)\r\n\r\n c.run('mkdir -p {path}; chmod 777 {path}'.format(path=path))\r\n c.run('''curl https://www.centos.org/download/full-mirrorlist.csv \\\r\n | sed 's/^.*\"http:/http:/' | sed 's/\".*$//' | grep ^http > /etc/apt-cacher-ng/centos_mirrors''')\r\n\r\n \"\"\" 修改配置\r\n \"\"\"\r\n sed.path(local.proxy_conf)\r\n sed.grep(**{'sep': ': '})\r\n sed.append(c, '''VfilePatternEx: ^(/\\\\\\\\?release=[0-9]+&arch=.*|.*/RPM-GPG-KEY-examplevendor)$''', '# WfilePatternEx:')\r\n # sed.append(c, '''VfilePatternEx: ^/\\\\\\\\?release=[0-9]+&arch=''', '# WfilePatternEx:')\r\n sed.append(c, 'Remap-centos: file:centos_mirrors \\/centos', 'Remap-debrep', pos=-1)\r\n sed.append(c, 'PassThroughPattern: (mirrors\\\\\\\\.fedoraproject\\\\\\\\.org|some\\\\\\\\.other\\\\\\\\.repo|yet\\\\\\\\.another\\\\\\\\.repo):443', '# PassThroughPattern: private-ppa', pos=5)\r\n sed.update(c, 'CacheDir', path)\r\n\r\n \"\"\" 启动服务\r\n \"\"\"\r\n if globing.invoke:\r\n c.run('''cat << EOF > /start.sh\r\n#!/bin/bash\r\n\r\necho \"start proxy\"\r\n\r\ntouch /var/log/apt-cacher-ng/a.log\r\n#/etc/init.d/apt-cacher-ng start\r\n\r\n/usr/sbin/apt-cacher-ng -c /etc/apt-cacher-ng pidfile=/var/run/apt-cacher-ng/pid SocketPath=/var/run/apt-cacher-ng/socket foreground=0\r\ntail -f /var/log/apt-cacher-ng/*\r\nEOF''')\r\n else:\r\n c.run('systemctl restart apt-cacher-ng.service')\r\n\r\n system.help(c, '''\r\n http://{host}:3142\r\n http://{host}:3142/acng-report.html\r\n \r\n tail -f /var/log/apt-cacher-ng/*'''.format(host=c.host), 'you can visit')",
"def get_proxy_userdata ( proxy_name, elb_name, elb_protocol, app_name ) :\n return \"\"\"#!/bin/bash\ncp /etc/httpd/conf/httpd.conf /etc/httpd/conf/httpd.conf.orig &&\ncat /etc/httpd/conf/httpd.conf.orig | \\\nsed s/\\$SERVER_NAME/\"\"\" + proxy_name + \"\"\"/g | \\\nsed s/\\$PROXY_PROTOCOL/\"\"\" + elb_protocol + \"\"\"/g | \\\nsed s/\\$PROXY_PASS/\"\"\" + elb_name + \"\"\"/g | \\\nsed s/\\$APP_NAME/\"\"\" + app_name + \"\"\"/g > /etc/httpd/conf/httpd.conf\n\necho \"Updated apache configuration file\"\n\"\"\"",
"def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype, addr, port, rdns, username, password)",
"def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n global _defaultproxy\r\n _defaultproxy = (proxytype, addr, port, rdns, username, password)",
"def test_add_proxy():\n result = False\n\n proxy = {\n \"name\": \"proxy\",\n \"address\": \"proxy.ntnxlab.local\",\n \"port\": \"8080\",\n \"http\": True,\n \"https\": True,\n \"socks\": False,\n \"username\": '',\n \"password\": '',\n }\n\n cluster_obj = prism.Cluster(api_client=_api())\n config_obj = prism.Config(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n for each_uuid in clusters:\n config_obj.set_proxy(address=proxy['address'], port=proxy['port'], name=proxy['name'], http=proxy['http'], https=proxy['https'],\n username=proxy['username'], password=proxy['password'], socks=proxy['socks'], clusteruuid=each_uuid)\n cluster_proxy = config_obj.get_proxy(clusteruuid=each_uuid)\n\n if proxy['address'] == cluster_proxy[0]['address']:\n result = True\n\n assert result",
"async def _create_proxy(self):\n self._proxy = await self._controller.fopen_tcp_proxy(\n Cellular._DRONE_WEB_API_PORT\n )\n\n self._drone_http_url = f\"http://{self._proxy.address}:{self._proxy.port}\"\n\n if self._autoconfigure and self._user_apc_token is None:\n self.logger.info(\"cellular auto pairing and configuration\")\n # generate a new anonymous user APC token and configure the cellular.\n self._fautoconfigure_with_new_token()",
"def start_proxy(self, script=None, config=None):\n wait = 5\n if self.remote is True:\n wait = 20\n ignore_hostname = os.getenv('proxy_hostname_ignore', '')\n\n config = config or {}\n script_path = None\n status_code = ''\n if not script:\n script = 'har_logging'\n\n if script == 'har_logging':\n self.log_output('Starting mitmdump proxy server with har logging')\n script_path = self.har_dump_path\n elif script == 'blacklist':\n self.log_output('Starting mitmdump proxy server with blacklisting script')\n script_path = self.blacklister_path\n status_code = '403'\n elif script == 'empty_response':\n self.log_output('Starting mitmdump proxy server with empty response script')\n script_path = self.empty_response_path\n elif script == 'har_and_blacklist':\n self.log_output('Starting mitmdump proxy server with blacklisting and '\n 'har logging script')\n script_path = self.har_blacklist_path\n status_code = '403'\n elif script == 'json_resp_field_rewriter':\n self.log_output('Starting mitmdump proxy server with json response'\n 'field rewrite script enabled')\n script_path = self.json_resp_rewrite_path\n elif script == 'response_replace':\n self.log_output('Starting mitmdump proxy server with response'\n 'replace script enabled')\n script_path = self.response_replace_path\n elif script == 'request_throttle':\n self.log_output('Starting mitmdump proxy server with request throttle '\n 'enabled ')\n script_path = self.request_throttle_path\n elif script == 'har_logging_no_replace':\n self.log_output('Starting mitmdump proxy server with har logging, no replace')\n script_path = self.har_dump_no_replace_path\n else:\n raise Exception('Unknown proxy script provided.')\n\n fixture_path = self.fixtures_dir + config.get('fixture_file', '')\n fixture_path_two = self.fixtures_dir + config.get('fixture_file_two', '')\n command = (\"python {0}/proxy_launcher.py \"\n \"--ulimit={1} --python3_path={2} --har_dump_path={3} \"\n \"--har_path={4} --proxy_port={5} --script_path={6} \"\n .format(\n self.path_to_scripts, self.ulimit_s, self.python3_path,\n self.har_dump_path, self.har_path, self.proxy_port, script_path))\n if self.remote is True:\n command = \"{0} --mode=transparent\".format(command)\n command = (\"{0} \"\n \"--status_code={1} \"\n \"--field_name={2} --field_value='{3}' \"\n \"--partial_url='{4}' --partial_url_2='{5}' \"\n \"--fixture_path='{6}' --fixture_path_2='{7}' \"\n \"--run_identifier='{8}' \"\n \"--ignore_hostname={9} &\"\n .format(\n command,\n config.get('status_code', status_code),\n config.get('field_name', ''),\n config.get('field_value', ''),\n config.get('partial_url', ''),\n config.get('partial_url_2', ''),\n fixture_path,\n fixture_path_two,\n config.get('run_identifier', ''),\n ignore_hostname))\n self.run_command(command)\n self.log_output(\"Waiting for {0}s after proxy start\".format(wait))\n time.sleep(wait)\n return self",
"def myproxy_server(self):\n return self.__get_option('myproxy_server')",
"def entry_point(proxy_port_number):\n\n setup_sockets(proxy_port_number)\n print(\"*\" * 50)\n print(\"[entry_point] Implement me!\")\n print(\"*\" * 50)\n return None",
"def set_proxy_with_environment_variable():\r\n\r\n logging.debug('set_proxy_with_environment_variable()')\r\n\r\n proxies['http'] = os.getenv('HTTP_PROXY','http://0.0.0.0:80/')\r\n proxies['https'] = os.getenv('HTTPS_PROXY','http://0.0.0.0:80/')",
"def proxy_host(self, proxy_host: ConfigNodePropertyString):\n\n self._proxy_host = proxy_host",
"def register_with_proxy(self, proxy: CommandProxy):\n return proxy.argument(*self.name_or_flags, **self.kwargs)",
"def attach_new_proxy(self, proxy):\n self.detach_current_proxy()\n self.attach_proxy(proxy)",
"def run_proxy(port, address=\"\", start_ioloop=True):\n app = tornado.web.Application([\n (r'.*', ProxyHandler),\n ])\n app.listen(port,address=address,ssl_options={\"certfile\":\"key.pem\",\"keyfile\":\"key.pem\"})\n ioloop = tornado.ioloop.IOLoop.instance()\n if start_ioloop:\n ioloop.start()",
"def push_proxy(self, proxy):\n with self.lock:\n self.proxies.append(proxy)",
"def set_proxy(self, host: str, port: int,\n proxy_type=ProxyTypes.Http,\n secret='', # for Mtproto\n username='',\n password='',\n http_only=False, # For HTTP: Pass true, if the proxy supports only HTTP requests and doesn't support\n # transparent TCP connections via HTTP CONNECT method.\n check_proxy=True) -> None:\n self.remove_proxy()\n\n proxy_type_obj = {\n '@type': proxy_type,\n 'secret': secret,\n 'http_only': http_only,\n 'username': username,\n 'password': password,\n }\n\n self.call_method('addProxy', server=host, port=port, enable=True, type=proxy_type_obj)\n\n if check_proxy:\n self.check_proxy()"
] | [
"0.6480587",
"0.63064706",
"0.6297876",
"0.6274107",
"0.6256712",
"0.6220115",
"0.6220115",
"0.62108266",
"0.6096157",
"0.60532874",
"0.59750015",
"0.58875364",
"0.5885344",
"0.5819008",
"0.5797923",
"0.57936794",
"0.57888424",
"0.57888424",
"0.57849467",
"0.5771609",
"0.5730738",
"0.5727718",
"0.57078385",
"0.5692409",
"0.5687615",
"0.5662105",
"0.5629375",
"0.5585451",
"0.5574117",
"0.5573582"
] | 0.685254 | 0 |
Adds an Rack. [Arguments] | def fusion_api_add_rack(self, body, api=None, headers=None):
return self.rack.create(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, arguments):\n url = arguments['<location>']\n if url:\n name = arguments['<name>']\n else:\n url = arguments['<name>']\n name = None\n version = arguments['--box-version']\n force = arguments['--force']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n utils.add_box(url, name=name, version=version, force=force, requests_kwargs=requests_kwargs)",
"def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def fusion_api_add_rack_manager(self, body, api=None, headers=None):\n return self.rackmanager.post(body, api, headers)",
"def add(self, uri):\n yield from self.command('add \"{}\"'.format(uri))\n return True",
"def add(self, middleware):\n pass # pragma: no cover",
"def add(self, filename, *args):\n return self.cmd('add', filename, *args)",
"def create_rack(self, datacenter, name, vlan_id_min, vlan_id_max, nrsq):\n log.info(\"Adding rack %s...\" % name)\n rack = Rack.builder(self.__context, datacenter) \\\n .name(name) \\\n .vlanIdMin(vlan_id_min) \\\n .vlanIdMax(vlan_id_max) \\\n .nrsq(nrsq) \\\n .build()\n rack.save()\n return rack",
"def add(self, uri, name):\n pass",
"def add_frame(*args):\n return _ida_frame.add_frame(*args)",
"def add(self, name: str, binary: str) -> None:\n\n def func(\n args_as_list: List[str], capture_output: bool = False, dir_to_execute: str = None\n ) -> List[str]:\n \"\"\"Provide the adapter arguments as a list via \"args_as_list\".\"\"\"\n cmd = [binary] + args_as_list\n return self._run(cmd, capture_output, dir_to_execute)\n\n self.adapters[name] = func",
"def register_middleware(self, middleware, opts={}):\n self._middleware.append((middleware, opts))",
"def add(name, **spider_args):",
"def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')",
"def add(*args, **kwargs): # real signature unknown\n pass",
"def _add(args):\n\n fs = disdat.fs.DisdatFS()\n\n if not fs.in_context():\n _logger.warning('Not in a data context')\n return\n\n _ = api.add(fs._curr_context.get_local_name(),\n args.bundle,\n args.path_name,\n tags=common.parse_args_tags(args.tag))\n\n return",
"def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def add_env(self, env):\n pass",
"def add_route(app, *args):\n for route in args:\n app.router.add_route(route[0], route[1], route[2])",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args: Any, **kwargs: Any) -> None:\n self._arguments.append((args, kwargs))",
"def add(self, url, **params):\n\n if 'tags' in params and isinstance(params['tags'], basestring):\n params['tags'] = params['tags'].split(',')\n\n self.queue('add', url=url, **params)",
"def add_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n messages: List[JSON] = _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_add_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )\n\n for message in messages:\n if \"message\" in message:\n raise DBWriteException(message=message[\"message\"])",
"def add_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n print filename\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = set(metadata.get(\"tags\", []))\n tags.add(tag_name)\n metadata[\"tags\"] = list(tags)\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"added\", 200",
"def add(self, *args):\n pass",
"def add(self, *args):\n pass",
"def append(self, argument, typehint = None):\n\n if typehint == 'b':\n binary = OSCBlob(argument)\n else:\n binary = OSCArgument(argument)\n\n self.typetags = self.typetags + binary[0]\n self.rawAppend(binary[1])",
"def _do_add_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n block_type = int(args[4])\r\n starting_address = int(args[5])\r\n length = int(args[6])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.add_block(name, block_type, starting_address, length)\r\n return name",
"def add_argument(self, parser):\n parser.add_argument(*self.args, **self.kwargs)"
] | [
"0.5927713",
"0.56901604",
"0.5436218",
"0.5289482",
"0.52472395",
"0.51888865",
"0.51383805",
"0.5118685",
"0.50398815",
"0.49658102",
"0.49425036",
"0.49303275",
"0.49211237",
"0.49207878",
"0.48921698",
"0.48837593",
"0.48635554",
"0.48410836",
"0.48392603",
"0.48392603",
"0.48392603",
"0.48289475",
"0.47855738",
"0.47494388",
"0.47121552",
"0.4709664",
"0.4709664",
"0.46883625",
"0.468752",
"0.46852735"
] | 0.6106261 | 0 |
Updates a Rack. [Arguments] | def fusion_api_edit_rack(self, body, uri, api=None, headers=None):
return self.rack.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(*args):",
"def fusion_api_patch_rack_manager(self, body, uri, api=None, headers=None):\n return self.rackmanager.patch(body=body, uri=uri, api=api, headers=headers)",
"def update(self, args):\n pass",
"def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )",
"def update(openstack_resource, args):\n args = reset_dict_empty_keys(args)\n openstack_resource.update(args)",
"def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def PUT(self, env, start_response):\n # checking params available\n AMZ_ACL = set(['HTTP_X_AMZ_GRANT_READ',\n 'HTTP_X_AMZ_GRANT_WRITE',\n 'HTTP_X_AMZ_GRANT_READ_ACP',\n 'HTTP_X_AMZ_GRANT_WRITE_ACP',\n 'HTTP_X_AMZ_GRANT_FULL_CONTROL'])\n qs = env.get('QUERY_STRING', '')\n args = urlparse.parse_qs(qs, 1)\n if not args:\n if not self.validate_bucket_name(self.container_name):\n return self.get_err_response('InvalidBucketName')\n\n if not self.is_unique(self.container_name):\n return self.get_err_response('BucketAlreadyExists')\n\n # to create a new one\n if 'HTTP_X_AMZ_ACL' in env:\n amz_acl = env['HTTP_X_AMZ_ACL']\n translated_acl = self.swift_acl_translate(canned=amz_acl)\n for header, value in translated_acl:\n env[header] = value\n elif AMZ_ACL & set(env.keys()):\n acld = dict()\n if 'HTTP_X_AMZ_GRANT_READ' in env.keys():\n acld['read'] = self.keyvalue2dict(env['HTTP_X_AMZ_GRANT_READ'])\n if 'HTTP_X_AMZ_GRANT_WRITE' in env.keys():\n acld['write'] = self.keyvalue2dict(env['HTTP_X_AMZ_GRANT_WRITE'])\n if 'HTTP_X_AMZ_GRANT_FULL_CONTROL' in env.keys():\n acld['full'] = self.keyvalue2dict(env['HTTP_X_AMZ_GRANT_FULL_CONTROL'])\n translated_acl = self.swift_acl_translate(acl=acld)\n for header, value in translated_acl:\n env[header] = value\n\n # modify env put to swift\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if status != HTTP_CREATED:\n if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n elif status == HTTP_ACCEPTED:\n return self.get_err_response('BucketAlreadyExists')\n else:\n return self.get_err_response('InvalidURI')\n\n resp = Response()\n resp.headers['Location'] = self.container_name\n resp.status = HTTP_OK\n return resp\n\n if len(args) > 1:\n return self.get_err_response('InvalidURI')\n\n # now args only 1\n action = args.keys().pop()\n if action == 'acl':\n # put acl\n acl = env['wsgi.input'].read()\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_ACL'] = quote(acl)\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'cors':\n # put cors\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_ORIGIN'] = ','.join([i.text for i in bodye.xpath('/CORSConfiguration/CORSRule/AllowedOrigin')])\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_MAX_AGE'] = ','.join([i.text for i in bodye.xpath('/CORSConfiguration/CORSRule/MaxAgeSeconds')])\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_EXPOSE_HEADERS'] = ','.join([i.text for i in bodye.xpath('/CORSConfiguration/CORSRule/ExposeHeader')])\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_METHOD'] = ','.join(i.text for i in bodye.xpath('/CORSConfiguration/CORSRule/AllowedMethod'))\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.headers['Location'] = self.container_name\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n\n elif action == 'lifecycle':\n # put lifecycle\n container_info = get_container_info(env, self.app)\n if container_info['versions']:\n return self.get_err_response('AccessDenied')\n\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n\n tat = bodye.xpath('/LifecycleConfiguration/Rule/Transition/Date')\n env['HTTP_X_CONTAINER_META_TRANS_AT'] = tat[0].text if tat else ''\n tafter = bodye.xpath('/LifecycleConfiguration/Rule/Transition/Days')\n env['HTTP_X_CONTAINER_META_TRANS_AFTER'] = tafter[0].text if tafter else ''\n trans = bodye.xpath('/LifecycleConfiguration/Rule/Transition/StorageClass')\n env['HTTP_X_CONTAINER_META_TRANS_CLASS'] = trans[0].text if trans else ''\n\n at = bodye.xpath('/LifecycleConfiguration/Rule/Expiration/Date')\n env['HTTP_X_CONTAINER_META_EXPIRATION_AT'] = at[0].text if at else ''\n after = bodye.xpath('/LifecycleConfiguration/Rule/Expiration/Days')\n env['HTTP_X_CONTAINER_META_EXPIRATION_AFTER'] = after[0].text if after else ''\n prefix = bodye.xpath('/LifecycleConfiguration/Rule/Prefix')\n env['HTTP_X_CONTAINER_META_EXPIRATION_PREFIX'] = prefix[0].text if prefix else ''\n stat = bodye.xpath('/LifecycleConfiguration/Rule/Status')\n env['HTTP_X_CONTAINER_META_EXPIRATION_STATUS'] = stat[0].text if stat else ''\n\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'policy':\n # put policy\n json = env['wsgi.input'].read()\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_POLICY'] = quote(json)\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'logging':\n # put logging\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n target = bodye.xpath('/BucketLoggingStatus/LoggingEnabled/TargetBucket')\n if target:\n env['HTTP_X_CONTAINER_META_LOGGING_TARGET'] = target[0].text\n prefix = bodye.xpath('/BucketLoggingStatus/LoggingEnabled/TargetPrefix')\n if prefix:\n env['HTTP_X_CONTAINER_META_LOGGING_PREFIX'] = prefix[0].text\n else:\n env['HTTP_X_CONTAINER_META_LOGGING_TARGET'] = ''\n env['HTTP_X_CONTAINER_META_LOGGING_PREFIX'] = ''\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'notification':\n # put it\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n topic = bodye.xpath('/NotificationConfiguration/TopicConfiguration/Topic')\n event = bodye.xpath('/NotificationConfiguration/TopicConfiguration/Event')\n if not topic or not event:\n return self.get_err_response('InvalidArgument')\n\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n\n env['HTTP_CONTAINER_META_NOTI_TOPIC'] = topic[0].text\n env['HTTP_CONTAINER_META_NOTI_EVENT'] = event[0].text\n\n env['HTTP_X_CONTAINER_META_NOTI'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'tagging':\n # put tagging\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n for tag in bodye.xpath('/Tagging/TagSet/Tag'):\n key = tag.xpath('Key')[0].text\n value = tag.xpath('Key')[0].text + tag.xpath('Value')[0].text\n env['HTTP_X_CONTAINER_META_TAG_%s' % key.upper()] = value\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n\n elif action == 'requestPayment':\n # put it\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n target = bodye.xpath('/RequestPaymentConfiguration/Payer')\n\n if not target or target[0].text not in ('BucketOwner', 'Requester'):\n return self.get_err_response('InvalidArgument')\n \n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n\n env['HTTP_X_CONTAINER_META_PAYMENT'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n\n elif action == 'versioning':\n bodye = self.xmlbody2elem(env['wsgi.input'].read())\n status = bodye.xpath('/VersioningConfiguration/Status')\n if status:\n status = status[0].text\n\n env['REQUEST_METHOD'] = 'POST'\n env['HTTP_X_VERSIONS_LOCATION'] = self.version_name(self.container_name) if status == 'Enabled' else ''\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n path = '/v1/AUTH_%s/%s' % (self.account_name, self.version_name(self.container_name))\n env2 = copyenv(env, method='PUT', path=path, query_string='')\n body_iter2 = self._app_call(env2)\n status2 = self._get_status_int()\n if is_success(status) and is_success(status2):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'website':\n # put website\n body = env['wsgi.input'].read()\n\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n\n env['HTTP_X_CONTAINER_META_WEBSITE'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n else:\n return self.get_err_response('InvalidURI')",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def UPDATE(self, req):\n container_partition, containers = self.app.container_ring.get_nodes(\n self.account_name, self.container_name)\n # Since this isn't client facing, expect callers to supply an index\n policy_index = req.headers['X-Backend-Storage-Policy-Index']\n headers = self._backend_requests(\n req, len(containers), account_partition=None, accounts=[],\n policy_index=policy_index)\n return self.make_requests(\n req, self.app.container_ring, container_partition, 'UPDATE',\n req.swift_entity_path, headers, body=req.body)",
"def update(self, params):",
"def update(self, *args, **kw):\n pass",
"def update(self, data: bytes):\n self.send(data)",
"async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def update(self, **kwargs):\n return self.client.api.update_container(self.id, **kwargs)",
"def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))",
"def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)",
"def update(self, *args, **kwargs):",
"def update(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n\n existing = client.read(path)\n if existing is None:\n existing = {}\n else:\n existing = existing[\"data\"]\n\n existing.update(kwargs)\n\n client.write(path, **existing)",
"def update(self, adt=None, url=None, params=None):\n if not self._id_exists():\n abort(404, f\"Application with ID {self.app_id} does not exist\")\n elif not self.engine.app_list:\n abort(404, \"There are no currently running applications\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, validate_only=True)\n try:\n self.engine.update(self.app_id, tpl, adaps)\n except Exception as error:\n abort(500, f\"Error while updating: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully updated\"}",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass"
] | [
"0.5774131",
"0.5578522",
"0.55758435",
"0.5526201",
"0.5488171",
"0.54531944",
"0.5292256",
"0.5292256",
"0.5292256",
"0.5292256",
"0.5292256",
"0.5292256",
"0.52577055",
"0.5245182",
"0.5232308",
"0.52314997",
"0.5219298",
"0.52054316",
"0.5188376",
"0.51276726",
"0.51169217",
"0.5114415",
"0.51098007",
"0.5097331",
"0.5091163",
"0.508112",
"0.50703776",
"0.5069807",
"0.5069807",
"0.5069807"
] | 0.6490726 | 0 |
Removes a Rack. If name or uri are not specified, all Racks are removed. [Arguments] | def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):
return self.rack.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def remove(name):",
"def fusion_api_remove_switch(self, name=None, uri=None, api=None, headers=None):\n return self.switch.delete(name, uri, api, headers)",
"def remove(self, uri):\n\n uri = uri.strip('/')\n if self.exists(uri):\n parts = uri.rsplit(\"/\", 1)\n if len(parts) == 1:\n self.nodes.pop(parts[0])\n else:\n node = self.get(parts[0])\n node.pop(parts[1], None)",
"def delete_stack(Name=None):\n pass",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def rm(args):\n args.delete = True\n return remove(args)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)",
"def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def remove(url: str):\n authenticated = credentials.authenticate(url)\n REMOVER_REGISTRY.get_handler(authenticated.scheme).remove(authenticated)",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def rm(self, s3uri, **kwargs):\n return self.exec_command('rm %s' % (s3uri), **kwargs)",
"def rm(self, *args, **kwargs):\n return self.unload(*args, **kwargs)",
"def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)",
"def remove(self, name):\n raise NotImplementedError",
"def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)",
"def delete_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_delete_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )",
"def remove(self, name: str) -> None:\n del self.components[name]",
"def rm(self, name: str) -> None:\n path = self.get_path(name)\n if os.path.exists(path):\n os.remove(path)",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()",
"def rm(self, uri):\n path = osaka.utils.get_uri_path(uri)\n try:\n osaka.utils.LOGGER.debug(\"Removing {0} as a file\".format(uri))\n self.webdav.delete(path)\n except Exception as e:\n osaka.utils.LOGGER.debug(\n \"Removing {0} as a directory, file encountered error {1}\".format(uri, e)\n )\n self.webdav.rmdir(path)",
"def RemoveBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n # Expand bucket name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.object_name:\n raise CommandException('\"rb\" command requires a URI with no object '\n 'name')\n print 'Removing %s...' % uri\n uri.delete_bucket(headers)",
"def remove_ruleset(args, rulesengine_db):\n import os\n from src.praxxis.sqlite import sqlite_rulesengine\n from src.praxxis.rulesengine import rules\n\n if hasattr(args, \"name\"):\n name = args.name\n else:\n name = args\n\n name = rules.get_ruleset_by_ordinal(name, rulesengine_db)\n\n path = sqlite_rulesengine.get_ruleset_path(rulesengine_db, name)\n\n if os.path.isfile(path):\n os.remove(path)\n sqlite_rulesengine.remove_ruleset(rulesengine_db, name)\n else:\n from src.praxxis.util import error\n raise error.RulesetNotFoundError(name)\n\n return name",
"def remove(self, *names):\n for name in names:\n self._storage.pop(name, None)",
"def remove(ctx, schain_name):\n skale = ctx.obj['skale']\n skale.manager.delete_schain(schain_name, wait_for=True,\n gas_price=4500000000)\n print(f'sChain {schain_name} removed!')",
"def remove(self, name):\n self.rpc.call(MsfRpcMethod.DbDelWorkspace, [name])"
] | [
"0.66184926",
"0.58236885",
"0.57525504",
"0.5575144",
"0.55182993",
"0.55122954",
"0.5508407",
"0.5438682",
"0.542369",
"0.541744",
"0.54139674",
"0.54103285",
"0.53904045",
"0.5386714",
"0.5372913",
"0.5354262",
"0.53232646",
"0.5298272",
"0.5291884",
"0.5280291",
"0.5249628",
"0.5234167",
"0.52196217",
"0.51995564",
"0.51988083",
"0.51888937",
"0.5180402",
"0.51571095",
"0.5148875",
"0.5138829"
] | 0.74977267 | 0 |
Creates remote syslog. API documentation was incomplete when this was created! [Arguments] | def fusion_api_configure_remote_syslog(self, body, api=None, headers=None):
return self.remote_syslog.create(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def syslog_source(handle, faults=\"enabled\", audits=\"enabled\",\n events=\"enabled\"):\n\n from ucsmsdk.mometa.comm.CommSyslogSource import CommSyslogSource\n\n mo = CommSyslogSource(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n faults=faults,\n audits=audits,\n events=events)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def syslog_local_console(handle, admin_state, severity=\"emergencies\"):\n\n from ucsmsdk.mometa.comm.CommSyslogConsole import CommSyslogConsole\n\n mo = CommSyslogConsole(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n admin_state=admin_state, severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def process_syslog_message(self, request: Tuple[bytes, socket]):\n # Parse data from socket request\n message = bytes.decode(request[0].strip())\n source_ip_address, source_port = request[1].getsockname()\n message_list = message.split(\"-\")\n\n # Store it in a data structure\n message_dict = dict()\n message_dict[\"src_port\"] = source_port\n message_dict[\"src_ip\"] = source_ip_address\n message_dict[\"time\"] = message_list[0].split(\":\", 1)[1].split(\": \")[0].strip()\n message_dict[\"level\"] = int(message_list[1])\n message_dict[\"syslog\"] = message_list[2]\n\n # Save to mongo\n devices = Device.objects(src_ip=source_ip_address)\n if not devices:\n device = Device(src_ip=source_ip_address, src_port=source_port)\n else:\n device = devices[0]\n\n # Save syslog to database\n syslog = Syslog(**message_dict)\n syslog.save()\n message_dict[\"syslog_id\"] = str(syslog.id)\n\n # Send message\n response = self.post_message(message=message_dict)\n\n # Get the slack thread id and save it to the syslog\n thread_ts = response.data[\"ts\"]\n syslog.thread_ts = thread_ts\n syslog.save()\n\n # Reference is in the device and save the device\n device.syslogs.append(syslog)\n device.syslog_count += 1\n device.save()",
"def syslog_remote_enable(handle, name, hostname,\n severity=\"emergencies\", forwarding_facility=\"local0\"):\n\n from ucsmsdk.mometa.comm.CommSyslogClient import CommSyslogClient\n\n mo = CommSyslogClient(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n forwarding_facility=forwarding_facility,\n hostname=hostname, admin_state=\"enabled\",\n severity=severity, name=name)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def syslog(message, level=LEVEL['notice'], facility=FACILITY['daemon'],\n\thost='localhost', port=514):\n\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tdata = '<%d>%s' % (level + facility*8, message)\n\tsock.sendto(data, (host, port))\n\tsock.close()",
"def send_syslog(attacker_ip, syslog_server=\"127.0.0.1\",\n syslog_port=514):\n logger = logging.getLogger(\"flytrap\")\n logger.setLevel(logging.CRITICAL)\n handler = logging.handlers.SysLogHandler(address=(syslog_server,\n syslog_port))\n logger.addHandler(handler)\n logger.critical(\"flytrap: \" + attacker_ip + \" took the bait!\")",
"def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def syslog_send_notification(manager: SyslogManager, min_severity: int):\n message = demisto.args().get('message', '')\n entry = demisto.args().get('entry')\n ignore_add_url = demisto.args().get('ignoreAddURL', False)\n log_level = demisto.args().get('level', 'INFO')\n severity = demisto.args().get('severity') # From server\n message_type = demisto.args().get('messageType', '') # From server\n\n if severity:\n try:\n severity = int(severity)\n except Exception:\n severity = None\n\n if message_type == INCIDENT_OPENED and (severity is not None and severity < min_severity):\n return\n\n if not message:\n message = ''\n\n message = message.replace('\\n', ' ').replace('\\r', ' ').replace('`', '')\n investigation = demisto.investigation()\n if investigation:\n investigation_id = investigation.get('id')\n if entry:\n message = f'{entry}, {message}'\n message = f'{investigation_id}, {message}'\n\n if ignore_add_url and isinstance(ignore_add_url, str):\n ignore_add_url = bool(strtobool(ignore_add_url))\n if not ignore_add_url:\n investigation = demisto.investigation()\n server_links = demisto.demistoUrls()\n if investigation:\n if investigation.get('type') != PLAYGROUND_INVESTIGATION_TYPE:\n link = server_links.get('warRoom')\n if link:\n if entry:\n link += '/' + entry\n message += f' {link}'\n else:\n link = server_links.get('server', '')\n if link:\n message += f' {link}#/home'\n\n if not message:\n raise ValueError('No message received')\n\n send_log(manager, message, log_level)\n\n demisto.results('Message sent to Syslog successfully.')",
"def logger(msg, tag=None):\n import syslog\n if not tag:\n from sys import argv\n from os.path import basename\n tag = basename(argv[0])\n syslog.openlog(tag)\n syslog.syslog(str(msg))",
"def syslog(ctx, config):\n if ctx.archive is None:\n # disable this whole feature if we're not going to archive the data anyway\n yield\n return\n\n log.info('Starting syslog monitoring...')\n\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'mkdir', '-p', '-m0755', '--',\n '{adir}/syslog'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n CONF = '/etc/rsyslog.d/80-cephtest.conf'\n conf_fp = StringIO('''\nkern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat\n*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat\n'''.format(adir=archive_dir))\n try:\n for rem in ctx.cluster.remotes.iterkeys():\n misc.sudo_write_file(\n remote=rem,\n path=CONF,\n data=conf_fp,\n )\n conf_fp.seek(0)\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'service',\n # a mere reload (SIGHUP) doesn't seem to make\n # rsyslog open the files\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n\n yield\n finally:\n log.info('Shutting down syslog monitoring...')\n\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'rm',\n '-f',\n '--',\n CONF,\n run.Raw('&&'),\n 'sudo',\n 'service',\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n # race condition: nothing actually says rsyslog had time to\n # flush the file fully. oh well.\n\n log.info('Checking logs for errors...')\n for rem in ctx.cluster.remotes.iterkeys():\n log.debug('Checking %s', rem.name)\n r = rem.run(\n args=[\n 'egrep', '--binary-files=text',\n '\\\\bBUG\\\\b|\\\\bINFO\\\\b|\\\\bDEADLOCK\\\\b',\n run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),\n run.Raw('|'),\n 'grep', '-v', 'task .* blocked for more than .* seconds',\n run.Raw('|'),\n 'grep', '-v', 'lockdep is turned off',\n run.Raw('|'),\n 'grep', '-v', 'trying to register non-static key',\n run.Raw('|'),\n 'grep', '-v', 'DEBUG: fsize', # xfs_fsr\n run.Raw('|'),\n 'grep', '-v', 'CRON', # ignore cron noise\n run.Raw('|'),\n 'grep', '-v', 'BUG: bad unlock balance detected', # #6097\n run.Raw('|'),\n 'grep', '-v', 'inconsistent lock state', # FIXME see #2523\n run.Raw('|'),\n 'grep', '-v', '*** DEADLOCK ***', # part of lockdep output\n run.Raw('|'),\n 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147\n run.Raw('|'),\n 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',\n run.Raw('|'),\n 'grep', '-v', 'INFO: recovery required on readonly',\n run.Raw('|'),\n 'head', '-n', '1',\n ],\n stdout=StringIO(),\n )\n stdout = r.stdout.getvalue()\n if stdout != '':\n log.error('Error in syslog on %s: %s', rem.name, stdout)\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = \\\n \"'{error}' in syslog\".format(error=stdout)\n\n log.info('Compressing syslogs...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'find',\n '{adir}/syslog'.format(adir=archive_dir),\n '-name',\n '*.log',\n '-print0',\n run.Raw('|'),\n 'sudo',\n 'xargs',\n '-0',\n '--no-run-if-empty',\n '--',\n 'gzip',\n '--',\n ],\n wait=False,\n ),\n )",
"def syslog_local_monitor(handle, admin_state, severity=\"emergencies\"):\n\n from ucsmsdk.mometa.comm.CommSyslogMonitor import CommSyslogMonitor\n\n mo = CommSyslogMonitor(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n admin_state=admin_state,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def fusion_api_update_remote_syslog_configuration(self, body, api=None, headers=None, param=None):\n return self.remote_syslog.update(body, api, headers, param)",
"def configure_syslog_server(client_session, server, port, protocol):\n\n syslog_body_dict = { 'syslogServer': server, 'port': port, 'protocol': protocol }\n\n cfg_result = client_session.update('systemSyslogServer', request_body_dict={'syslogserver': syslog_body_dict})\n\n if cfg_result['status'] == 204:\n return True\n else:\n return False",
"def setup_syslog_handler(facility, fmt):\n # Check provided facility is valid, otherwise fall back to user\n if find_facility(facility) == -1:\n facility = \"user\"\n\n handler = None # So we can check for success\n # handlerPaths = Linux/BSD interface, MAC OSX interface\n handlerPaths = ['/dev/log', '/var/run/syslog']\n for path in handlerPaths:\n try:\n handler = logging.handlers.SysLogHandler(path, facility)\n except IOError as e:\n if e.errno == 2: # No such file, try the next one\n continue\n else: # Unexpected exception, fallback to manual logging\n return setup_log_file_handler(\n config, fallback_logfile, fallback_logfile_fmt)\n else:\n break\n\n if handler is not None: # Don't assume we were successful, validate!\n #handler.setLevel(logging.DEBUG) # Appears to be pointless...\n handler_formatter = logging.Formatter(fmt=fmt)\n handler_formatter.converter = time.gmtime\n handler.setFormatter(handler_formatter)\n return handler, None\n else: # We didn't find the syslog interface, fallback\n return setup_log_file_handler(\n config, fallback_logfile, fallback_logfile_fmt)",
"def test_syslog_shortcut_simple(self):\n with cleanup_handlers():\n expected_message = random_string(50)\n coloredlogs.install(syslog=True)\n logging.info(\"%s\", expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(expected_message in line for line in handle)",
"def forward(self, data):\n host = self._CONFIG.read('syslog', 'host')\n port = self._CONFIG.read('syslog', 'port')\n self._SOC.sendto(bytes(data, encoding='utf-8'), (host, int(port)))",
"def handle_syslog_message(self, port, source_address=None,\n message=None):\n channel_names = self.router.get_channel_names_for_port(port)\n\n formatted_source = '{0[0]}:{0[1]:d}'.format(source_address)\n formatted_message = format_syslog_message(message)\n text = '{} {}'.format(formatted_source, formatted_message)\n\n message_received.send(channel_names=channel_names,\n text=text,\n source_address=source_address)",
"def write(pri, level, message):\n log = json.dumps({'time': time.time(), 'level': level, 'message': message})\n\n syslog.openlog('liaison')\n syslog.syslog(pri, log)\n if not sys.stdout.isatty():\n if pri in [syslog.LOG_DEBUG, syslog.LOG_INFO]:\n print(log, file=sys.stderr)\n else:\n print(log)",
"def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))",
"async def run(self, local_addr: t.Tuple[str, int] = None) -> None:\n connection = await self._loop.create_datagram_endpoint(\n lambda: SyslogProtocol(self),\n local_addr=local_addr or ('0.0.0.0', 514))\n\n self.transport, self.protocol = connection",
"def start_sysdig(self):\n\t\ttarget_pid = self.info[\"target_pid\"]\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".scap\")\n\t\tself.info[\"sysdig_log_path\"] = log_file\n\t\tcmd = [\"/usr/bin/sysdig\",\"-n%d\"%(self.cfg.sysdig_limit),\"-w\"+self.info[\"sysdig_log_path\"] ]\n\t\tself.p_sysdig = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"sysdig starts, logfile:%s\",self.info[\"sysdig_log_path\"] )",
"def test_syslog_shortcut_enhanced(self):\n with cleanup_handlers():\n the_expected_message = random_string(50)\n not_an_expected_message = random_string(50)\n coloredlogs.install(syslog='warning')\n logging.info(\"%s\", not_an_expected_message)\n logging.warning(\"%s\", the_expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(the_expected_message in line for line in handle)\n assert not any(not_an_expected_message in line for line in handle)",
"def set_rsyslog_new_configuration():\n with open(rsyslog_conf_path, \"rt\") as fin:\n with open(\"tmp.txt\", \"wt\") as fout:\n for line in fin:\n if \"imudp\" in line or \"imtcp\" in line:\n # Load configuration line requires 1 replacement\n if \"load\" in line:\n fout.write(line.replace(\"#\", \"\", 1))\n # Port configuration line requires 2 replacements\n elif \"port\" in line:\n fout.write(line.replace(\"#\", \"\", 2))\n else:\n fout.write(line)\n else:\n fout.write(line)\n command_tokens = [\"sudo\", \"mv\", \"tmp.txt\", rsyslog_conf_path]\n write_new_content = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)\n time.sleep(3)\n o, e = write_new_content.communicate()\n if e is not None:\n handle_error(e,\n error_response_str=\"Error: could not change Rsyslog.conf configuration in -\" + rsyslog_conf_path)\n return False\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True",
"def get_rsyslog_group():\n\n @click.group(name=\"rsyslog\")\n def rsyslog_group():\n \"\"\" rsyslog service \"\"\"\n\n rsyslog_group.add_command(pull)\n rsyslog_group.add_command(start)\n return rsyslog_group",
"def setup_logging(use_syslog=False):\n\n LOG.setLevel(logging.INFO)\n if use_syslog:\n ch = SysLogHandler()\n else:\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d] '\n '%(levelname)s: %(message)s'))\n LOG.addHandler(ch)",
"def syslog_config(self, syslog_config):\n\n self._syslog_config = syslog_config",
"def init(*, threshold_lvl=1, quiet_stdout=False, log_file):\n global _logger, _log_lvl\n\n # translate lvl to those used by 'logging' module\n _log_lvl = _set_lvl(threshold_lvl)\n\n # logger Creation\n _logger = logging.getLogger(PKG_NAME)\n _logger.setLevel(_log_lvl)\n\n # create syslog handler and set level to info\n log_h = logging.FileHandler(log_file)\n\n # Base message format\n base_fmt = '%(asctime)s - %(name)s - [%(levelname)s] - %(message)s'\n\n # set formatter\n log_fmt = logging.Formatter(base_fmt)\n log_h.setFormatter(log_fmt)\n # add Handler\n _logger.addHandler(log_h)\n\n # create stout handler\n if not quiet_stdout:\n global _stdout\n _stdout = True",
"def create(exe_path: str, arguments: str=None, remote_host: str=None, user: str=None, user_domain: str=None,\n password: str=None) -> Tuple[CommandLine, Callable[[str], None]]:\n if '-' in remote_host:\n remote_host = '\"' + remote_host + '\"'\n args = [\"/node:\" + remote_host]\n\n args.append(\"/user:\\\"{}\\\\{}\\\"\".format(user_domain, user))\n\n args.append(\"/password:\\\"{}\\\"\".format(password))\n\n args += [\"process\", \"call\", \"create\"]\n\n args.append('\"{} {}\"'.format(exe_path, arguments))\n\n return wmic(args), parsers.wmic.create",
"def fusion_api_delete_remote_syslog(self, logId, param='', api=None, headers=None):\n return self.remote_syslog.delete(logId, param, api, headers)",
"def test_syslog_bsd_sample(self):\n self.assertEqual(jc.parsers.syslog_bsd.parse(self.syslog, quiet=True), self.syslog_json)"
] | [
"0.6110804",
"0.6035433",
"0.5958222",
"0.58643603",
"0.5857156",
"0.58214575",
"0.5620683",
"0.55518126",
"0.5456826",
"0.53790456",
"0.53560174",
"0.52599466",
"0.524096",
"0.51989275",
"0.5180635",
"0.5104847",
"0.49666247",
"0.49389327",
"0.4907948",
"0.4890985",
"0.48505464",
"0.48458344",
"0.48165166",
"0.4811627",
"0.4811109",
"0.48079354",
"0.4806668",
"0.48058024",
"0.4799356",
"0.4791039"
] | 0.7081112 | 0 |
This handles update for remote syslog configuration. Configures the devices managed by OneView. [Arguments] | def fusion_api_update_remote_syslog_configuration(self, body, api=None, headers=None, param=None):
return self.remote_syslog.update(body, api, headers, param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_configure_remote_syslog(self, body, api=None, headers=None):\n return self.remote_syslog.create(body, api, headers)",
"def set_rsyslog_new_configuration():\n with open(rsyslog_conf_path, \"rt\") as fin:\n with open(\"tmp.txt\", \"wt\") as fout:\n for line in fin:\n if \"imudp\" in line or \"imtcp\" in line:\n # Load configuration line requires 1 replacement\n if \"load\" in line:\n fout.write(line.replace(\"#\", \"\", 1))\n # Port configuration line requires 2 replacements\n elif \"port\" in line:\n fout.write(line.replace(\"#\", \"\", 2))\n else:\n fout.write(line)\n else:\n fout.write(line)\n command_tokens = [\"sudo\", \"mv\", \"tmp.txt\", rsyslog_conf_path]\n write_new_content = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)\n time.sleep(3)\n o, e = write_new_content.communicate()\n if e is not None:\n handle_error(e,\n error_response_str=\"Error: could not change Rsyslog.conf configuration in -\" + rsyslog_conf_path)\n return False\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True",
"def syslog_config_from_platform_setting(self, syslog_config_from_platform_setting):\n\n self._syslog_config_from_platform_setting = syslog_config_from_platform_setting",
"def syslog_config(self, syslog_config):\n\n self._syslog_config = syslog_config",
"def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))",
"def UpdateConfig(self, instalog_config, update_info, env):\n if update_info.get('data_truncate', {}).get('enable', False):\n # If enable data_truncate, Instalog truncate once a day.\n instalog_config['buffer']['args']['truncate_interval'] = 86400\n\n threshold = update_info.get('input_http', {}).get(\n 'log_level_threshold', logging.NOTSET)\n instalog_config['input']['http_in']['args']['log_level_threshold'] = (\n threshold)\n\n if update_info.get('forward', {}).get('enable', False):\n args = update_info.get('forward', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_pull_socket_port\n instalog_config['output']['forward'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('forward')\n\n if update_info.get('customized_output', {}).get('enable', False):\n args = update_info.get('customized_output', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_customized_output_port\n instalog_config['output']['customized_output'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append(\n 'customized_output')\n\n if update_info.get('archive', {}).get('enable', False):\n instalog_config['output']['archive'] = {\n 'plugin': 'output_archive',\n 'args': update_info.get('archive', {}).get('args', {}).copy()\n }\n # Set the target_dir.\n target_dir = os.path.join(env.umpire_data_dir, 'instalog_archives')\n instalog_config['output']['archive']['args']['target_dir'] = target_dir\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('archive')",
"def set_rsyslog_old_configuration():\n add_udp = False\n add_tcp = False\n # Do the configuration lines exist\n is_exist_udp_conf = False\n is_exist_tcp_conf = False\n with open(rsyslog_conf_path, \"rt\") as fin:\n for line in fin:\n if \"imudp\" in line or \"UDPServerRun\" in line:\n is_exist_udp_conf = True\n add_udp = True if \"#\" in line else False\n elif \"imtcp\" in line or \"InputTCPServerRun\" in line:\n is_exist_tcp_conf = True\n add_tcp = True if \"#\" in line else False\n fin.close()\n if add_udp or not is_exist_udp_conf:\n append_content_to_file(rsyslog_old_config_udp_content, rsyslog_conf_path)\n if add_tcp or not is_exist_tcp_conf:\n append_content_to_file(rsyslog_old_config_tcp_content, rsyslog_conf_path)\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True",
"def configure_syslog_server(client_session, server, port, protocol):\n\n syslog_body_dict = { 'syslogServer': server, 'port': port, 'protocol': protocol }\n\n cfg_result = client_session.update('systemSyslogServer', request_body_dict={'syslogserver': syslog_body_dict})\n\n if cfg_result['status'] == 204:\n return True\n else:\n return False",
"def fusion_api_get_remote_syslog_configuration(self, api=None, headers=None, param=None):\n return self.remote_syslog.get(api=api, headers=headers, param=param)",
"def _set_advance_syslog(zd, **kwargs):\n xlocs = LOCATOR_CFG_SYSTEM_NETWORKMGMT\n adv_opt = ['zd_facility_name', 'zd_priority_level', 'ap_facility_name', 'ap_priority_level']\n adv_cfg = {'pause': 1}\n adv_cfg.update(kwargs)\n \n if zd.s.is_element_present(xlocs['syslog_advanced_setting_collapse']):\n zd.s.click_and_wait(xlocs['syslog_advanced_setting_click'])\n time.sleep(adv_cfg['pause'])\n \n for key in adv_opt:\n if adv_cfg.get(key) is not None:\n zd.s.select_value(xlocs[key], adv_cfg[key])",
"def omc_conf_set(host_id, omc_fields, omc_config, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n err1 = [0, 0]\n result = \"\"\n param = []\n resultarray = {}\n param.append('omcIpAddress.1')\n param.append('periodicStatsTimer.1')\n form_name = ['OMC IP address', 'Periodic Statistics Timer']\n dictarr = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n odu16_omc_conf_table = sqlalche_obj.session.query(SetOdu16OmcConfTable).filter(\n SetOdu16OmcConfTable.config_profile_id == device_param_list[0][4]).all()\n result += str(odu16_omc_conf_table)\n for i in range(len(omc_fields)):\n omc_oid = oid_name[omc_fields[i]]\n omc_type = oid_type[omc_fields[i]]\n omc_type_val = omc_config[i]\n result += snmp_set(device_param_list[0][0], device_param_list[0][1], device_param_list[0][2], device_param_list[\n 0][3], omc_oid, omc_type, omc_type_val)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n # el.log_event( \"description detail\" , \"user_name\" )\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR UNMP Form\", \"%s\" % (user_name))\n if int(err1[0]) == 1:\n odu16_omc_conf_table[0].omc_ip_address = omc_config[0]\n if int(err1[1]) == 1:\n odu16_omc_conf_table[0].periodic_stats_timer = omc_config[1]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n for j in range(0, len(omc_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = omc_config[j]\n dict[\"textbox\"] = omc_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err != '':\n raise Set_exception\n except Set_exception, e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16OmcConfTable'\n resultarray['formAction'] = 'omc_config_form.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)",
"def update_log_config(self, monitor_name, log_config):\n pass",
"def setupLogging(loglevel=logging.INFO):\n\n # The following configures two loggers, the root logger and a logger named \"phone_ctlr_log\". Messages sent to the\n # root logger will be sent to the system log using the syslog protocol, and messages to the \"phone_ctlr_log\" logger will\n # be written to the Phone_Agent.log file which will be rotated once the log reaches 1Mb.\n\n configdict = {\n 'version': 1, # Configuration schema in use; must be 1 for now\n #'disable_existing_loggers': True, # Disables all existing logging configurations\n\n 'formatters': {\n 'brief': {\n 'format' : '%(levelname)-8s %(asctime)s (%(created)s) %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'standard': {\n 'format' : '%(levelname)-8s %(asctime)s %(name)-15s %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'console': {\n 'format' : '%(levelname)-8s %(asctime)s -- %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'custom': {\n 'format' : '%(asctime)s - %(message)s',\n 'datefmt': '%Y-%m-%dT%H:%M:%S.%Z' } ### Ex,: 2038-01-01T05:05:02\n },\n\n 'handlers': {'applog': {'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/opt/tools/phone_agent/Phone_Agent.log',\n #'filename': 'Phone_Agent.log',\n 'backupCount': 3,\n 'formatter': 'custom',\n 'level': 'INFO',\n 'maxBytes': 1024*1024},\n 'conlog': {'class': 'logging.StreamHandler',\n 'formatter': 'console',\n #'stream': 'console',\n 'level': 'DEBUG'},\n 'syslog': {'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'standard',\n 'level': 'ERROR'}},\n\n # Specify all the subordinate loggers\n 'loggers': {\n 'phone_ctlr_log': {\n 'handlers': ['applog']\n },\n 'console_log': {\n 'handlers': ['conlog']\n }\n },\n # Specify properties of the root logger\n 'root': {\n 'handlers': ['syslog']\n },\n }\n\n # Set up configuration\n logging.config.dictConfig(configdict)",
"def process_syslog_message(self, request: Tuple[bytes, socket]):\n # Parse data from socket request\n message = bytes.decode(request[0].strip())\n source_ip_address, source_port = request[1].getsockname()\n message_list = message.split(\"-\")\n\n # Store it in a data structure\n message_dict = dict()\n message_dict[\"src_port\"] = source_port\n message_dict[\"src_ip\"] = source_ip_address\n message_dict[\"time\"] = message_list[0].split(\":\", 1)[1].split(\": \")[0].strip()\n message_dict[\"level\"] = int(message_list[1])\n message_dict[\"syslog\"] = message_list[2]\n\n # Save to mongo\n devices = Device.objects(src_ip=source_ip_address)\n if not devices:\n device = Device(src_ip=source_ip_address, src_port=source_port)\n else:\n device = devices[0]\n\n # Save syslog to database\n syslog = Syslog(**message_dict)\n syslog.save()\n message_dict[\"syslog_id\"] = str(syslog.id)\n\n # Send message\n response = self.post_message(message=message_dict)\n\n # Get the slack thread id and save it to the syslog\n thread_ts = response.data[\"ts\"]\n syslog.thread_ts = thread_ts\n syslog.save()\n\n # Reference is in the device and save the device\n device.syslogs.append(syslog)\n device.syslog_count += 1\n device.save()",
"def configure_logging():\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n\n # Enable logging to syslog as well:\n # Normally this would not be necessary but logging assumes syslog listens on\n # localhost syslog/udp, which is disabled on 10.5 (rdar://5871746)\n syslog = logging.handlers.SysLogHandler('/var/run/syslog')\n syslog.setFormatter(logging.Formatter('%(name)s: %(message)s'))\n syslog.setLevel(logging.INFO)\n logging.getLogger().addHandler(syslog)",
"def _on_config_changed(self, _):\n self._configure_pod()",
"def syslog_remote_enable(handle, name, hostname,\n severity=\"emergencies\", forwarding_facility=\"local0\"):\n\n from ucsmsdk.mometa.comm.CommSyslogClient import CommSyslogClient\n\n mo = CommSyslogClient(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n forwarding_facility=forwarding_facility,\n hostname=hostname, admin_state=\"enabled\",\n severity=severity, name=name)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def parse_main(self):\n try:\n self.common_config[\"debug\"] = self.config.get('main', 'debug')\n except ConfigParser.NoOptionError:\n self.common_config[\"debug\"] = \"FALSE\"\n \n try:\n conf_local_ip = self.config.get('main', 'local_ip')\n if is_valid_ipv4_address(conf_local_ip):\n self.common_config[\"local_ip\"] = conf_local_ip\n \n elif conf_local_ip == \"default\": #if loca_if == \"default\" try to reach google.com\n try:\n self.common_config[\"local_ip\"] = get_ip_address()\n except Exception, e:\n self.logger.configError(\"cannot discover local ip address: %s\" % e)\n sys.exit(1)\n\n else: #network interface name\n try:\n self.common_config[\"local_ip\"] = get_ip_address_ifname(conf_local_ip)\n except Exception, e:\n self.logger.configError(\"cannot determine ip address of %s interface: %s\" % (conf_local_ip, e))\n sys.exit(1)\n\n except ConfigParser.NoOptionError: \n self.logger.configError(\"Missing mandatory parameters in config file, bailing out!\")\n sys.exit(1)\n\n try:\n log_file = self.common_config[\"log_file\"] = self.config.get('main', 'log_file') \n if log_file.startswith(\"syslog\"):\n try:\n syslog_host = log_file.split(\":\")[1]\n except IndexError:\n syslog_host = 'localhost'\n try:\n syslog_port = int(log_file.split(\":\")[2])\n except IndexError:\n syslog_port = 514\n try:\n syslog_facility = log_file.split(\":\")[3]\n except IndexError:\n syslog_facility = logging.handlers.SysLogHandler.LOG_USER\n self.logger.debugMessage(\"Logging to syslog (host: %s, port: %s, facility: %s)\" % ((syslog_host, syslog_port, syslog_facility)))\n self.common_config[\"conf_log_handler\"] = logging.handlers.SysLogHandler((syslog_host, syslog_port), syslog_facility)\n else:\n self.logger.debugMessage(\"Logging to file: %s\" % log_file)\n try:\n self.common_config[\"conf_log_handler\"] = logging.FileHandler(log_file)\n except IOError, e:\n self.logger.configError(\"cannot access to the log file: %s\" % e)\n sys.exit(1)\n \n except ConfigParser.NoOptionError: \n # no log defined in config file\n self.common_config[\"conf_log_handler\"] = None\n \n try:\n self.common_config[\"daemon\"] = self.config.get('main', 'daemon')\n except ConfigParser.NoOptionError:\n self.common_config[\"daemon\"] = None\n try:\n self.common_config[\"pid_file\"] = self.config.get('main', 'pid_file')\n except ConfigParser.NoOptionError:\n self.common_config[\"pid_file\"] = None\n\n \n return self.common_config",
"def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0",
"def handle_config_change(self, msg):\n self.xmpp.event('groupchat_config_status', msg)\n self.xmpp.event('muc::%s::config_status' % msg['from'].bare , msg)",
"def omc_registration_configuration_set(host_id, omc_registration_fields, omc_registration_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n omc_registration_configuration = []\n result = ''\n param = []\n dictarr = []\n err1 = [0, 0, 0, 0, 0]\n resultarray = {}\n form_name = ['Address', 'Contact Person', 'Mobile',\n 'AlternateContact', 'Email']\n param.append('sysOmcRegisterContactAddr.1')\n param.append('sysOmcRegisterContactPerson.1')\n param.append('sysOmcRegisterContactMobile.1')\n param.append('sysOmcRegisterAlternateContact.1')\n param.append('sysOmcRegisterContactEmail.1')\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n omc_registration_configuration = sqlalche_obj.session.query(SetOdu16SysOmcRegistrationTable).filter(\n SetOdu16SysOmcRegistrationTable.config_profile_id == device_param_list[0][4]).first()\n for i in range(len(omc_registration_fields)):\n oidname = oid_name[omc_registration_fields[i]]\n oidtype = oid_type[omc_registration_fields[i]]\n oidvalue = omc_registration_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR OMC Registration Form\", \"%s\" % (user_name))\n for j in range(0, len(omc_registration_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = omc_registration_param[j]\n dict[\"textbox\"] = omc_registration_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n omc_registration_configuration.sys_omc_register_contact_addr = omc_registration_param[\n 0]\n if err1[1] == 1:\n omc_registration_configuration.sys_omc_register_contact_person = omc_registration_param[1]\n if err1[2] == 1:\n omc_registration_configuration.sys_omc_register_contact_mobile = omc_registration_param[2]\n if err1[3] == 1:\n omc_registration_configuration.sys_omc_register_alternate_contact = omc_registration_param[3]\n if err1[4] == 1:\n omc_registration_configuration.sys_omc_register_contact_email = omc_registration_param[\n 4]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16SysOmcRegistrationTable'\n resultarray['formAction'] = 'sys_registration_form.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)",
"def syslog_source(handle, faults=\"enabled\", audits=\"enabled\",\n events=\"enabled\"):\n\n from ucsmsdk.mometa.comm.CommSyslogSource import CommSyslogSource\n\n mo = CommSyslogSource(parent_mo_or_dn=\"sys/svc-ext/syslog\",\n faults=faults,\n audits=audits,\n events=events)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def updateDevice(self, *args):\r\n\r\n # Update the list of vision choices and the default vision choice\r\n self._appChoice[\"vision\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()]]\r\n self._appString[\"vision\"].set(self._appChoice[\"vision\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"vision\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"vision\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"vision\"].set(value))",
"def syslogservers(self, site_id, element_id, syslogserver_id, data, tenant_id=None, api_version=\"v2.2\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/elements/{}/syslogservers/{}\".format(api_version,\n tenant_id,\n site_id,\n element_id,\n syslogserver_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def config_probe(self, widget, data=None):\n\t\tConfigure.ExcludeServer = (int(self.builder.get_object(\"MasterRadio\").get_active()))\n\t\tConfigure.MaxNodes = \t (int(self.builder.get_object(\"NodeScale\").get_value()))\n\t\tConfigure.LocalhostOnly = (int(self.builder.get_object(\"LocalHostRadio\").get_active()))\n\t\tConfigure.TimeStep = \t (int(self.builder.get_object(\"TimeStepScale\").get_value()))\n\t\tConfigure.Interval = \t (int(self.builder.get_object(\"IntervalScale\").get_value()))\n\n\t\tnomeFile = (str(self.builder.get_object(\"NameText\").get_text()))\n\n\t\tif ('/' not in nomeFile) : Configure.SaveConfig(NewFile=\"./extra/UserOutput/\"+nomeFile)\n\t\telse : Configure.SaveConfig(NewFile = nomeFile)\n\t\t\n\n\t\tprint \"### Sending setup signal to Monitor...\"\n\t\tself.setup_monitor()",
"def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)",
"def file_and_malware_syslog_config(self, file_and_malware_syslog_config):\n\n self._file_and_malware_syslog_config = file_and_malware_syslog_config",
"def conf_update(self):\n pass",
"def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config",
"def config():\n sudo(\n r\"sed -i '/#password=/c\\password=abcdefghijklmnopq' /etc/minv/minv.conf\"\n )\n sudo(\n r\"sed -i '/log_level = INFO/c\\log_level = DEBUG' /etc/minv/minv.conf\"\n )"
] | [
"0.6466546",
"0.60999686",
"0.59437025",
"0.59193707",
"0.58530056",
"0.5824026",
"0.57210904",
"0.56041205",
"0.5596374",
"0.5579762",
"0.5573274",
"0.5550738",
"0.54969245",
"0.54969096",
"0.5486056",
"0.54306436",
"0.53711075",
"0.5369695",
"0.52771443",
"0.52151686",
"0.52037346",
"0.51851237",
"0.5153138",
"0.5121115",
"0.51148576",
"0.50965244",
"0.5083955",
"0.5081741",
"0.5061231",
"0.5057264"
] | 0.666714 | 0 |
Deletes remote syslog. API documentation was incomplete when this was created! [Arguments] | def fusion_api_delete_remote_syslog(self, logId, param='', api=None, headers=None):
return self.remote_syslog.delete(logId, param, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_syslog_server(client_session):\n\n cfg_result = client_session.delete('systemSyslogServer')\n\n if cfg_result['status'] == 204:\n return True\n else:\n return False",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def remove_host(sid):\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n try:\n hosts.delete(db, sid)\n db.commit()\n ret = {'remove': {'success': True}}\n return jsonify(ret)\n except:\n abort(400)",
"def cli_truncate_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n \n cmd_remove_logs = \"sudo docker exec pccserver sh -c 'rm logs/*.log*';sudo docker exec platina-executor sh -c 'rm logs/*.log*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_logs)\n\n cmd_remove_archive = \"sudo docker exec pccserver sh -c 'rm -rf logs/archive';sudo docker exec platina-executor sh -c 'rm -rf logs/archive'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_archive)\n\n cmd_remove_ansible_backup = \"sudo docker exec pccserver sh -c 'rm -rf logs/ansible-backup-logs';sudo docker exec platina-executor sh -c 'rm -rf logs/ansible-backup-logs'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_ansible_backup)\n\n cmd_remove_k8s_logs=\"sudo docker exec platina-executor sh -c 'rm -r /home/jobs/kubernetes/cluster/*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_k8s_logs)\n \n cmd_remove_ceph_logs=\"sudo docker exec pccserver sh -c 'rm -r /home/jobs/ceph/cluster/*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_ceph_logs)\n\n cmd_truncate_logs = \"sudo docker exec pccserver sh -c 'truncate -s 0 logs/*.log';sudo docker exec platina-executor sh -c 'truncate -s 0 logs/*.log'\"\n return cli_run(host_ip, linux_user, linux_password, cmd_truncate_logs) \n \n except Exception as e:\n return {\"Error\": str(e)}",
"def remove_user_log_in_frr(dut,log_file_name):\n st.config(dut,\"docker exec -it bgp rm /var/log/frr/%s\"%log_file_name)",
"def deleteIpTcp(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('DELETE', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified tcp does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\treturn deserialize_string_json(payload)",
"def send_syslog(attacker_ip, syslog_server=\"127.0.0.1\",\n syslog_port=514):\n logger = logging.getLogger(\"flytrap\")\n logger.setLevel(logging.CRITICAL)\n handler = logging.handlers.SysLogHandler(address=(syslog_server,\n syslog_port))\n logger.addHandler(handler)\n logger.critical(\"flytrap: \" + attacker_ip + \" took the bait!\")",
"def remote_kill():",
"def delete_log(self):\n os.system('rm -rf *.log')\n os.system('rm -rf *.log~')\n os.system('rm -rf *.last')\n os.system('rm -rf *.last~')",
"def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]",
"def syslog(ctx, config):\n if ctx.archive is None:\n # disable this whole feature if we're not going to archive the data anyway\n yield\n return\n\n log.info('Starting syslog monitoring...')\n\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'mkdir', '-p', '-m0755', '--',\n '{adir}/syslog'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n CONF = '/etc/rsyslog.d/80-cephtest.conf'\n conf_fp = StringIO('''\nkern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat\n*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat\n'''.format(adir=archive_dir))\n try:\n for rem in ctx.cluster.remotes.iterkeys():\n misc.sudo_write_file(\n remote=rem,\n path=CONF,\n data=conf_fp,\n )\n conf_fp.seek(0)\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'service',\n # a mere reload (SIGHUP) doesn't seem to make\n # rsyslog open the files\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n\n yield\n finally:\n log.info('Shutting down syslog monitoring...')\n\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo',\n 'rm',\n '-f',\n '--',\n CONF,\n run.Raw('&&'),\n 'sudo',\n 'service',\n 'rsyslog',\n 'restart',\n ],\n wait=False,\n ),\n )\n # race condition: nothing actually says rsyslog had time to\n # flush the file fully. oh well.\n\n log.info('Checking logs for errors...')\n for rem in ctx.cluster.remotes.iterkeys():\n log.debug('Checking %s', rem.name)\n r = rem.run(\n args=[\n 'egrep', '--binary-files=text',\n '\\\\bBUG\\\\b|\\\\bINFO\\\\b|\\\\bDEADLOCK\\\\b',\n run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),\n run.Raw('|'),\n 'grep', '-v', 'task .* blocked for more than .* seconds',\n run.Raw('|'),\n 'grep', '-v', 'lockdep is turned off',\n run.Raw('|'),\n 'grep', '-v', 'trying to register non-static key',\n run.Raw('|'),\n 'grep', '-v', 'DEBUG: fsize', # xfs_fsr\n run.Raw('|'),\n 'grep', '-v', 'CRON', # ignore cron noise\n run.Raw('|'),\n 'grep', '-v', 'BUG: bad unlock balance detected', # #6097\n run.Raw('|'),\n 'grep', '-v', 'inconsistent lock state', # FIXME see #2523\n run.Raw('|'),\n 'grep', '-v', '*** DEADLOCK ***', # part of lockdep output\n run.Raw('|'),\n 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147\n run.Raw('|'),\n 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',\n run.Raw('|'),\n 'grep', '-v', 'INFO: recovery required on readonly',\n run.Raw('|'),\n 'head', '-n', '1',\n ],\n stdout=StringIO(),\n )\n stdout = r.stdout.getvalue()\n if stdout != '':\n log.error('Error in syslog on %s: %s', rem.name, stdout)\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = \\\n \"'{error}' in syslog\".format(error=stdout)\n\n log.info('Compressing syslogs...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'find',\n '{adir}/syslog'.format(adir=archive_dir),\n '-name',\n '*.log',\n '-print0',\n run.Raw('|'),\n 'sudo',\n 'xargs',\n '-0',\n '--no-run-if-empty',\n '--',\n 'gzip',\n '--',\n ],\n wait=False,\n ),\n )",
"def syncrepl_delete(self, uuids):\n pass",
"def delete(self, host, file):",
"def process_syslog_message(self, request: Tuple[bytes, socket]):\n # Parse data from socket request\n message = bytes.decode(request[0].strip())\n source_ip_address, source_port = request[1].getsockname()\n message_list = message.split(\"-\")\n\n # Store it in a data structure\n message_dict = dict()\n message_dict[\"src_port\"] = source_port\n message_dict[\"src_ip\"] = source_ip_address\n message_dict[\"time\"] = message_list[0].split(\":\", 1)[1].split(\": \")[0].strip()\n message_dict[\"level\"] = int(message_list[1])\n message_dict[\"syslog\"] = message_list[2]\n\n # Save to mongo\n devices = Device.objects(src_ip=source_ip_address)\n if not devices:\n device = Device(src_ip=source_ip_address, src_port=source_port)\n else:\n device = devices[0]\n\n # Save syslog to database\n syslog = Syslog(**message_dict)\n syslog.save()\n message_dict[\"syslog_id\"] = str(syslog.id)\n\n # Send message\n response = self.post_message(message=message_dict)\n\n # Get the slack thread id and save it to the syslog\n thread_ts = response.data[\"ts\"]\n syslog.thread_ts = thread_ts\n syslog.save()\n\n # Reference is in the device and save the device\n device.syslogs.append(syslog)\n device.syslog_count += 1\n device.save()",
"def revoke(config, hostname, username):\n\n response = make_api_request('DELETE', config, '/machines/' + hostname +\n '/users/' + username)\n print 'Permission revoked successfully.'",
"def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None",
"def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()",
"def syslog_remote_disable(handle, name):\n\n mo = handle.query_dn(\"sys/svc-ext/syslog/client-\" + name)\n if mo:\n mo.admin_state = \"disabled\"\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n else:\n raise ValueError(\"Syslog Mo is not available.\")",
"def remove_socks():\n subprocess.run('sudo rm /tmp/*.sock -f', shell=True)",
"def remove_socks():\n subprocess.run('sudo rm /tmp/*.sock -f', shell=True)",
"def CleanupLogs(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('cleanupLogs', payload=payload, response_object=None)",
"def kill_log(log): \n handlers = log.handlers[:]\n for h in handlers:\n log.removeHandler(h)\n h.flush()\n h.close()",
"def delete(self, unique_id):\n return request(\n API_LIST.DNS_DELETE.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'id': unique_id\n }\n )",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)",
"def purgeLogs( self ):\n cmd = \"DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 15552000\"\n return self._update( cmd )",
"def delete(self, *args, **kwargs):\n # Delete listener\n if self.db.listener:\n self.db.listener.delete()\n \n # Delete puppets\n puppetlist = [puppet for puppet in\n search.search_tag(self.key+\"-puppet\")]\n for puppet in puppetlist:\n puppet.delete()\n\n # Delete bot\n self.db.ev_location.msg_contents(\"Bot commencing shut-down process.\")\n super(ServerBot, self).delete(*args, **kwargs)",
"def remove(ip):\n return __apf_cmd(\"-u {}\".format(ip))",
"def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)",
"def remote_cleanup(connections: ThreadingGroup, commands_file: str) -> None:\n connections.run(\n 'rm {file}'.format(file=commands_file)\n )\n connections.run(\n 'rm /tmp/evaneos_ssh__fabric_host'.format(file=commands_file)\n )",
"def fusion_api_configure_remote_syslog(self, body, api=None, headers=None):\n return self.remote_syslog.create(body, api, headers)"
] | [
"0.6933873",
"0.55352473",
"0.5523793",
"0.5316745",
"0.53076696",
"0.5305567",
"0.5293623",
"0.529209",
"0.52332973",
"0.5202621",
"0.520213",
"0.51900214",
"0.516378",
"0.5133967",
"0.5122535",
"0.5119516",
"0.510821",
"0.50966215",
"0.5096111",
"0.5096111",
"0.5094397",
"0.50831336",
"0.5072681",
"0.5071465",
"0.506223",
"0.5060553",
"0.504057",
"0.50266385",
"0.5023556",
"0.5001536"
] | 0.78789526 | 0 |
Gets current remote syslog configuration. [Example] ${resp} = Fusion Api Get Remote Syslog Configuration | | | | def fusion_api_get_remote_syslog_configuration(self, api=None, headers=None, param=None):
return self.remote_syslog.get(api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(req):\n #try:\n # user_id = req.user\n #except KeyError as e:\n # msg = req.get_error_msg(e)\n # return send_error_response(msg)\n try:\n config = tools_config_get_config(req)\n except Exception:\n raise http_exc.HTTPClientError()\n else:\n return Response(json_body=json.dumps(config), content_type='application/json')",
"def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")",
"def get_snmp_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/snmp-setting\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def get_syslog_server(client_session, user_id):\n\n cfg_result = client_session.read('systemSyslogServer')\n\n if cfg_result['status'] == 200:\n return True\n else:\n return False",
"def processGetConfig(self, msg):\r\n resp = MsgHelper.createResponse(Messages.RSP_GET_CONFIG, msg)\r\n resp[RunInto] = self.runInto\r\n resp[ExecDelay] = self.execDelay\r\n resp[ByStep] = self.stepByStep\r\n return resp",
"def log_config(self) -> 'outputs.ConnectorsLogConfigResponse':\n return pulumi.get(self, \"log_config\")",
"def get_system_config_property(connection, config_key):\n\n response = connection.get_json(f'system/config/{config_key}')\n config_value = response.get('result')\n if config_value is None:\n raise SAPCliError(\"gCTS response does not contain 'result'\")\n\n return config_value",
"def config_get():\n server_config = db.get().server_config_get()\n\n if not server_config:\n return flask.jsonify({\n \"message\": \"Netmet server has not been setup yet\"}), 404\n\n return flask.jsonify(server_config), 200",
"def get_dns_server_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/dns-setting/servers\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def notification_config(self) -> 'outputs.NotificationConfigResponse':\n return pulumi.get(self, \"notification_config\")",
"def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %s'%local_conf_name)\n except:\n print 'Could not remove %s. It may not exist'%(local_conf_name)\n logger.warning('Could not remove %s. It may not exist'%(local_conf_name))\n # Get remote conf file or return None if it cannot be retrieved\n try:\n get(rsyslog_conf_name,local_conf_name)\n except:\n print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id)\n logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id)\n return None\n # Open conf file or return None if it cannot be opened\n try:\n log_conf_file = open(local_conf_name,'r')\n except:\n print 'Cannot open %s from instance %s'%(local_conf_name,instance_id)\n logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id)\n return None\n return log_conf_file",
"def get_frr_config(conn_obj, device=\"dut\"):\n command = \" sudo cat /etc/sonic/frr/frr.conf\"\n if device==\"dut\":\n return utils_obj.remove_last_line_from_string(st.show(conn_obj, command, skip_tmpl=True))",
"def config(self) -> pulumi.Output['outputs.ConfigResponse']:\n return pulumi.get(self, \"config\")",
"def get_global_config(baseurl, cookie_header):\n url = baseurl + 'stacking/vsf/global_config'\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code",
"def monitoring_config(self) -> 'outputs.MonitoringConfigResponse':\n return pulumi.get(self, \"monitoring_config\")",
"async def get_log_settings(\n self, headers: dict[str, t.Any] = ..., as_json: t.Literal[False] = ...\n ) -> service_pb2.LogSettingsResponse:",
"def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def sql_server_audit_config(self) -> 'outputs.SqlServerAuditConfigResponse':\n return pulumi.get(self, \"sql_server_audit_config\")",
"def _get_lsp_config_notify_ospf(self):\n return self.__lsp_config_notify_ospf",
"def get_ntp_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/ntp-servers\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def fusion_api_update_remote_syslog_configuration(self, body, api=None, headers=None, param=None):\n return self.remote_syslog.update(body, api, headers, param)",
"def config(self) -> 'outputs.DeviceConfigResponse':\n return pulumi.get(self, \"config\")",
"def get(self, request, format=None):\n return Response({k: getattr(config, k) for k in list(dir(config))})",
"def get_current_config():\n global SOLR_ADDRES, SOLR_PORT, SOLR_CORE\n return {'host': SOLR_ADDRESS, 'port': SOLR_PORT, 'core': SOLR_CORE}",
"def getConfig(self, cfg_path, var_path=''):\n return self.ce_proxy.getConfig(self.userName, cfg_path, var_path)",
"def get_srv_config(self):\n\t\treturn Job(SDK.PrlSrv_GetSrvConfig(self.handle)[0])",
"def service_config():\n global _service_config\n if not _service_config:\n r = requests.get('https://tech.lds.org/mobile/ldstools/config.json')\n r.raise_for_status()\n _service_config = r.json()\n return _service_config",
"def get_server_pull_config(config:dict):\n try:\n server = config[\"DataTransfer\"][\"server\"]\n intersection = config[\"DataTransfer\"][\"intersection\"]\n startHour = config[\"DataTransfer\"][\"StartTime_PullFromIntersections\"][\"hour\"]\n startMinute = config[\"DataTransfer\"][\"StartTime_PullFromIntersections\"][\"minute\"]\n return server, intersection, startHour, startMinute\n except Exception as e:\n print(e)\n return -1, -1, -1, -1",
"def get_config(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVm_GetConfig', self.handle))"
] | [
"0.62976795",
"0.6167217",
"0.6033852",
"0.59730315",
"0.5912881",
"0.5785239",
"0.5746285",
"0.57154125",
"0.56996685",
"0.5613018",
"0.5584446",
"0.55679035",
"0.5522165",
"0.5490448",
"0.5458008",
"0.54465896",
"0.54264456",
"0.54160213",
"0.5399005",
"0.53983384",
"0.539763",
"0.5390428",
"0.53890854",
"0.5387823",
"0.53363097",
"0.52999884",
"0.52829474",
"0.5265419",
"0.5244062",
"0.5242763"
] | 0.7612827 | 0 |
Gets the status of a restore operation in progress. [Arguments] | def fusion_api_get_restore_status(self, param='', uri=None, api=None, headers=None):
return self.restore.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()",
"def get_import_status(self):\n return AsyncResult(self.import_task_id).state",
"def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)",
"def get_progress(self):\n ret = self.state + \"\\n\"\n self.reset_progress()\n return ret",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def restore(self):\n\t\treturn Job(SDK.PrlVm_Restore(self.handle)[0])",
"def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status",
"def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status",
"def status(self, *args):\n return self.cmd('status', *args)",
"def GetProgress(self):\n return self.new_progress",
"def _get_status(self):\n return self.__status",
"def getProgress(self):",
"def progress(self, job_id: str) -> Tuple[int, str]:\n session = self._session()\n response = session.get(self._status_url(job_id))\n if response.ok:\n return int(response.json()['progress']), response.json()['status']\n else:\n response.raise_for_status()",
"def getstatus(self):\n return self.__status",
"async def get_status():",
"def get_status(self):\n return self._refreshed",
"def status(self):\n self._refresh_state()\n return self._data.get('status')",
"def get_state(self):\n\t\treturn Job(SDK.PrlVm_GetState(self.handle)[0])",
"def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state",
"def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)",
"def query_job_progress():\n pass",
"def status(self):\n assert(self.__complete)\n return self.__status",
"def status(self, *arguments, **kwargs):\n return self.get_output('status', *arguments, **kwargs)",
"def get_progress(self):\n return self.cloudserver.progress",
"def status(self):\n return self.job_proto.status",
"def readback_status(self):\n status = ctypes.c_int()\n\n result = self._lib.NRFJPROG_readback_status(ctypes.byref(status))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return ReadbackProtection(status.value).name",
"def GetStatus(self):\r\n return self.status",
"def status(self):\n if self.num_steps >= self.timeout:\n return Status.TIMEOUT\n\n return Status.IN_PROGRESS"
] | [
"0.5846356",
"0.58421576",
"0.5776214",
"0.57326394",
"0.57296395",
"0.5717693",
"0.56996727",
"0.5607198",
"0.5600936",
"0.5580905",
"0.5555841",
"0.5540521",
"0.55308956",
"0.5523478",
"0.550687",
"0.5484425",
"0.5476827",
"0.5472633",
"0.54699814",
"0.5460195",
"0.545215",
"0.5435394",
"0.5431216",
"0.54215324",
"0.5395982",
"0.53909683",
"0.5382007",
"0.5366278",
"0.5339906",
"0.53358227"
] | 0.706412 | 0 |
Adds (POST) a role to a group. [Arguments] | def fusion_api_add_role_to_group(self, body, api=None, headers=None):
return self.roles.add_role_to_group(body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_role(userid, role, group, request=None):",
"def collection_post(request):\n\n # Our account parameter\n account = request.matchdict['id_account']\n\n # Our JSON parameter, this could be validated\n json = request.json_body\n role = json['role']\n\n # Our admin object\n admin = _get_admin(request)\n\n # Check if the account exists\n if account not in admin.list_accounts():\n request.response.status_int = 404\n return\n\n # Check if the role exists\n if role in admin.list_roles(account):\n request.response.status_int = 409\n return\n\n # Create the role\n admin.add_role(account, role)\n\n # Return appropriately\n request.response.status_int = 201",
"def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))",
"def test_add_role_simple_post(self):\n pass",
"def set_role(self, group, role):\n self.permissions[group] = roles[role]",
"def post(self):\n data = request.json\n\n name = data.get('name')\n description = data.get('description')\n role = Role(name=name,\n description=description)\n db.session.add(role)\n db.session.commit()\n\n return None, 201",
"def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}",
"async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})",
"def create_role(self, **kwargs):\n\n role = self.role_model(**kwargs)\n return self.put(role)",
"async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')",
"async def add_role_admin(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().admin.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return json({\"proposal_id\": proposal_id})",
"async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )",
"def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201",
"def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)",
"def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')",
"def test_add_role(self):\n pass",
"def add_user_group(self, groupname, ls_user):\n data = {\"groupname\": groupname, \"add_users\": ls_user}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(u\"groups/{}\".format(groupname))\n res = requests.put(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code in [200, 201, 206]:\n return Response(0, res)\n else:\n return Response(res.status_code, res)",
"def add_role():\n check_admin()\n add_role = True\n\n form = RoleForm()\n if form.validate_on_submit():\n role = Role(title=form.title.data)\n\n try:\n db.session.add(role)\n db.session.commit()\n flash('New role successfully created')\n except:\n flash('Error: Role title already exist')\n\n return redirect(url_for('admin.get_roles'))\n\n return render_template('admin/roles/role.html', form=form, add_role=add_role, title='Add Role')",
"def can_set_role(userid, role, group):",
"def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})",
"async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)",
"def addRole(self, name, description=\"\"):\n params = {\n \"f\" : \"json\",\n \"rolename\" : name,\n \"description\" : description\n }\n aURL = self._url + \"/roles/add\"\n return self._con.post(path=aURL, postdata=params)",
"def add_role():\r\n check_admin()\r\n\r\n add_role = True\r\n\r\n form = RoleForm()\r\n if form.validate_on_submit():\r\n role = Role(name=form.name.data,\r\n description=form.description.data)\r\n\r\n try:\r\n # add role to the database\r\n db.session.add(role)\r\n db.session.commit()\r\n flash('You have successfully added a new role.')\r\n except:\r\n # in case role name already exists\r\n flash('Error: role name already exists.',category='error')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n # load role template\r\n return render_template('admin/roles/role.html', add_role=add_role,\r\n form=form, title='Add Role')",
"def add_user_to_role(request, username_or_email, role, group_title, event_name):\r\n username_or_email = strip_if_string(username_or_email)\r\n try:\r\n user = _user_from_name_or_email(username_or_email)\r\n except User.DoesNotExist:\r\n return u'<font color=\"red\">Error: unknown username or email \"{0}\"</font>'.format(username_or_email)\r\n\r\n role.add_users(user)\r\n\r\n # Deal with historical event names\r\n if event_name in ('staff', 'beta-tester'):\r\n track.views.server_track(\r\n request,\r\n \"add-or-remove-user-group\",\r\n {\r\n \"event_name\": event_name,\r\n \"user\": unicode(user),\r\n \"event\": \"add\"\r\n },\r\n page=\"idashboard\"\r\n )\r\n else:\r\n track.views.server_track(request, \"add-instructor\", {\"instructor\": unicode(user)}, page=\"idashboard\")\r\n\r\n return '<font color=\"green\">Added {0} to {1}</font>'.format(user, group_title)",
"def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def grant_role(self, role, principal_ids):",
"def create_role(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_role`\")\n\n resource_path = '/oapi/v1/roles'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Role',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser",
"def add_role():\n\tcheck_admin()\n\tadd_role = True\n\n\tform = RoleForm()\n\tif form.validate_on_submit():\n\t\trole= Role(name= form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add role to the database \n\t\t\tdb.session.add(role)\n\t\t\tdb.session.commit()\n\t\t\tflash('You have successfully added a new role ')\n\t\texcept:\n\t\t\t#incase the role already exists\n\t\t flash(\"Error:the role already exists\")\n\n\t\t#redirect to the roles page\n\t\treturn redirect(url_for('admin.list_roles'))\n\n\t\t#load the role template\n\treturn render_template('admin/roles/role.html', add_role=add_role, form = form,title='Add Role')",
"def create_role(name, arn):\n\tsession = get_session()\n\tresponse = session.post(\"{url}/api/roles\".format(url=get_registry_url()), json={\"name\": name, \"arn\": arn})\n\treturn response.json()"
] | [
"0.7215559",
"0.6994631",
"0.67107433",
"0.67042166",
"0.6628243",
"0.6578587",
"0.6577107",
"0.6516352",
"0.64909977",
"0.6490786",
"0.6464874",
"0.645932",
"0.64541274",
"0.6426402",
"0.64008707",
"0.63850546",
"0.637715",
"0.6374156",
"0.6301998",
"0.62843347",
"0.6276652",
"0.62721336",
"0.6263202",
"0.625392",
"0.6246899",
"0.6227542",
"0.6212006",
"0.62011665",
"0.6195593",
"0.6190465"
] | 0.72560054 | 0 |
Gets a default or paginated collection of SAS Interconnect Types. [Arguments] | def fusion_api_get_sas_interconnect_types(self, uri=None, param='', api=None, headers=None):
return self.sasictypes.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_interconnect_types(self, param='', api=None, headers=None):\n return self.ictypes.get(api=api, headers=headers, param=param)",
"def fusion_api_get_sas_interconnects(self, uri=None, param='', api=None, headers=None):\n return self.sasics.get(uri=uri, api=api, headers=headers, param=param)",
"def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)",
"async def incidentTypes(self, includeHidden: bool = False) -> Iterable[str]:",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def getPrimaryTypes() -> List[int]:\n ...",
"def ntypes(self): # -> list[str]:\n ...",
"def ntypes(self): # -> list[None]:\n ...",
"def get_integrations_types(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'sort_by', 'expand', 'next_page', 'previous_page']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_types\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/types'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IntegrationTypeEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_types(self):\n return self.types",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def types():\n sql = \"\"\"SELECT DISTINCT sample_type\n FROM barcodes.sample\n ORDER BY sample_type\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return pm.sql.TRN.execute_fetchflatten()",
"def XPLMGetDataRefTypes(inDataRef):\n return int",
"def getTypes():\n\n\t\tquery = \"\\\n\t\t\tSELECT\\\n\t\t\t\tid_item_container_type,\\\n\t\t\t\tlabel\\\n\t\t\tFROM\\\n\t\t\t\titem_container_type\\\n\t\t\"\n\n\t\treturn {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}",
"def get_catalog_search_record_types(self):\n return TypeList([])",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def document_types(db: Session = Depends(get_db)):\n return get_document_types(db)",
"def ntypes(self): # -> None:\n ...",
"def datasource_types(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"datasource_types\")",
"def get_catalog_record_types(self):\n return TypeList([])",
"def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])",
"def types_clients_view(request):\n query = request.dbsession.query(ClientType).all()\n return Utils.serialize_many(query)",
"def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}",
"def get_informatieobjecttypen(\n clients: List[Client] = None,\n) -> List[InformatieObjectType]:\n if clients is None:\n clients = _get_ztc_clients()\n\n catalogi = {cat.url: cat for cat in get_catalogi(clients=clients)}\n iots = _fetch_list(\"informatieobjecttype\", clients, InformatieObjectType)\n\n # resolve relations\n for iot in iots:\n iot.catalogus = catalogi[iot.catalogus]\n\n return iots",
"def get_analysis_iocs():\n sample_id = demisto.getArg('id')\n ioc = demisto.getArg('ioc')\n url = SUB_API + 'samples/' + sample_id + '/analysis/iocs'\n if ioc:\n url += '/' + ioc\n params = {'api_key': API_KEY}\n if demisto.getArg('limit'):\n params['limit'] = demisto.getArg('limit')\n\n r = req('GET', url, params=params)\n iocs = [] # type: ignore\n dbots = [] # type: ignore\n items = demisto.get(r.json(), 'data.items') # type: ignore\n if not items:\n append_to_analysis_iocs_arrays(iocs, dbots, demisto.get(r.json(), 'data'))\n else:\n for k in items:\n append_to_analysis_iocs_arrays(iocs, dbots, k)\n md = tableToMarkdown('ThreatGrid Behavioral Indicators for sample: ' + demisto.getArg('id'), iocs,\n ['Title', 'Confidence', 'Severity', 'IOC', 'Tags', 'IOCCategory', 'Data'])\n md += tableToMarkdown('DBot', dbots, ['Indicator', 'Score', 'Type', 'Vendor'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.IOCs': iocs, 'DBotScore': dbots},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })",
"def FacetInspireDocType(self, default=None):\n return self.data.get('metadata', {}).get('facet_inspire_doc_type', [default])",
"def datasource_types(self) -> Sequence[str]:\n return pulumi.get(self, \"datasource_types\")",
"def by_type(self, types=None):\n return self.describe(only_type=types)",
"def Institutions(self, default=[{}]):\n tmp = self.data.get('institutions', default)\n return [HEP.InstitutionObject(i) for i in tmp]",
"def get_all(isamAppliance, count=None, start=None, filter=None, sortBy=None, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieve a list of policy information point types\",\n \"{0}/{1}\".format(uri,\n tools.create_query_string(count=count, start=start, filter=filter,\n sortBy=sortBy)),\n requires_modules=requires_modules, requires_version=requires_version)"
] | [
"0.685289",
"0.60499066",
"0.5813711",
"0.57985103",
"0.569108",
"0.5689285",
"0.5627344",
"0.5624865",
"0.55435634",
"0.54477674",
"0.5333183",
"0.52820915",
"0.5249448",
"0.524886",
"0.52481085",
"0.52208096",
"0.5203648",
"0.5190318",
"0.5188795",
"0.5187444",
"0.5154192",
"0.51531196",
"0.5139839",
"0.5138234",
"0.5109055",
"0.5007174",
"0.49624553",
"0.49531117",
"0.4949901",
"0.49414882"
] | 0.7134632 | 0 |
Gets a default or paginated collection of SAS Interconnects [Arguments] | def fusion_api_get_sas_interconnects(self, uri=None, param='', api=None, headers=None):
return self.sasics.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_sas_interconnect_types(self, uri=None, param='', api=None, headers=None):\n return self.sasictypes.get(uri=uri, api=api, headers=headers, param=param)",
"def describe_interconnects(interconnectId=None):\n pass",
"def get_analysis_iocs():\n sample_id = demisto.getArg('id')\n ioc = demisto.getArg('ioc')\n url = SUB_API + 'samples/' + sample_id + '/analysis/iocs'\n if ioc:\n url += '/' + ioc\n params = {'api_key': API_KEY}\n if demisto.getArg('limit'):\n params['limit'] = demisto.getArg('limit')\n\n r = req('GET', url, params=params)\n iocs = [] # type: ignore\n dbots = [] # type: ignore\n items = demisto.get(r.json(), 'data.items') # type: ignore\n if not items:\n append_to_analysis_iocs_arrays(iocs, dbots, demisto.get(r.json(), 'data'))\n else:\n for k in items:\n append_to_analysis_iocs_arrays(iocs, dbots, k)\n md = tableToMarkdown('ThreatGrid Behavioral Indicators for sample: ' + demisto.getArg('id'), iocs,\n ['Title', 'Confidence', 'Severity', 'IOC', 'Tags', 'IOCCategory', 'Data'])\n md += tableToMarkdown('DBot', dbots, ['Indicator', 'Score', 'Type', 'Vendor'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.IOCs': iocs, 'DBotScore': dbots},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })",
"def paginated_call(self) -> global___Snippet.ClientCall:",
"def RetrieveACISA():\n\tdb = DBConnector()\n\tcur = db.cursor()\n\n\tSQLcmd = \"SELECT * FROM snaps.SNAPsLocation\"\n\tcur.execute(SQLcmd)\n\treturnList = []\n\tcount = 0\n\tfor item in cur.fetchall():\n\t\tcount += 1\n\t\ttmplist = [item[1], item[2], count, str(item[0])]\n\t\treturnList.append(tmplist)\n\treturn returnList",
"def ListOIDCClients(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]",
"def list_interfering_aps(self, conn, group=None, label=None, site=None, swarm_id=None,\n start=None, end=None, from_timestamp=None, to_timestamp=None,\n limit=100, offset=0):\n path = urls.ROGUES[\"GET_INTERFERING_AP\"]\n params = {\n \"limit\": limit,\n \"offset\": offset\n }\n if group:\n params[\"group\"] = group\n if label:\n params[\"label\"] = label\n if site:\n params[\"site\"] = site\n if swarm_id:\n params[\"swarm_id\"] = swarm_id\n if start:\n params[\"start\"] = start\n if end:\n params[\"end\"] = end\n if from_timestamp:\n params[\"from_timestamp\"] = from_timestamp\n if to_timestamp:\n params[\"to_timestamp\"] = to_timestamp\n resp = conn.command(apiMethod=\"GET\", apiPath=path, apiParams=params)\n return resp",
"def ini_get_all():\n raise NotImplementedError()",
"async def connections_list(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n\n tag_filter = {}\n for param_name in (\n \"invitation_id\",\n \"my_did\",\n \"their_did\",\n \"request_id\",\n \"invitation_key\",\n \"their_public_did\",\n \"invitation_msg_id\",\n ):\n if param_name in request.query and request.query[param_name] != \"\":\n tag_filter[param_name] = request.query[param_name]\n\n post_filter = {}\n if request.query.get(\"alias\"):\n post_filter[\"alias\"] = request.query[\"alias\"]\n if request.query.get(\"state\"):\n post_filter[\"state\"] = list(ConnRecord.State.get(request.query[\"state\"]).value)\n if request.query.get(\"their_role\"):\n post_filter[\"their_role\"] = list(\n ConnRecord.Role.get(request.query[\"their_role\"]).value\n )\n if request.query.get(\"connection_protocol\"):\n post_filter[\"connection_protocol\"] = request.query[\"connection_protocol\"]\n\n profile = context.profile\n try:\n async with profile.session() as session:\n records = await ConnRecord.query(\n session, tag_filter, post_filter_positive=post_filter, alt=True\n )\n results = [record.serialize() for record in records]\n results.sort(key=connection_sort_key)\n except (StorageError, BaseModelError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response({\"results\": results})",
"def sinter(self, *args):\n self.connect()\n self._write('SINTER %s\\r\\n' % ' '.join(args))\n return set(self._get_multi_response())",
"def sns(self,**kwargs):\n\n\t\tcursor = kwargs.get('cursor',self.cursor)\n\t\treturn self.toc[cursor].keys()",
"def list_apiscout(self):\n return self.__make_api_call('list/apiscout')",
"def Institutions(self, default=[{}]):\n tmp = self.data.get('institutions', default)\n return [HEP.InstitutionObject(i) for i in tmp]",
"def getCatalogs():",
"def fusion_api_get_interconnect(self, uri=None, param='', api=None, headers=None):\n return self.ic.get(uri=uri, api=api, headers=headers, param=param)",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def oc(self, stimulusID):\r\n global stimulusAPI\r\n try:\r\n pageList = stimulusAPI.getStimulusScope(stimulusID)\r\n agentSet = set([])\r\n for page in pageList:\r\n localAgentList = stimulusAPI.getAllAgentsWithViewOfSpecifiedPage(page)\r\n localAgentSet = set(localAgentList)\r\n agentSet.update(localAgentSet)\r\n agentList = list(agentSet)\r\n return agentList\r\n except Exceptions.InvalidStimulusProcessingType as e:\r\n raise e\r\n except Exceptions.ScriptError as e:\r\n raise e\r\n #self.execute(stimulusID)\r\n except Exception as e:\r\n raise Exceptions.ScriptError(e)",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def fusion_api_get_interconnect_types(self, param='', api=None, headers=None):\n return self.ictypes.get(api=api, headers=headers, param=param)",
"def list_incidents_command():\n cursor = COLLECTION.find({}, {'_id': False})\n incidents = []\n results: list = []\n for incident in cursor:\n for name in incident:\n incidents.append(name)\n for i in incidents:\n if i not in results:\n results.append(i)\n human_readable = tableToMarkdown(f'List of incidents in collecion {COLLECTION_NAME}', results,\n headers=['Incidents'])\n return human_readable, {}, {}",
"def fusion_api_get_interconnect_nameservers(self, uri=None, api=None, param='', headers=None):\n param = '/nameServers%s' % param\n return self.ic.get(uri=uri, api=api, headers=headers, param=param)",
"def get_isoforms(xint,conn):\n\n isoforms = ('SELECT DISTINCT f.name '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, ' \n 'feature f, cvterm cvt, cvterm cvt2, feature_relationship fr, feature f2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fi.role_id = cvt.cvterm_id '\n 'AND fip.type_id = cvt2.cvterm_id AND '\n 'cvt2.name = \\'interacting isoform\\' AND f.feature_id = fr.subject_id '\n 'AND f2.feature_id = fr.object_id AND f.is_obsolete = \\'f\\' AND '\n 'f2.uniquename = %s AND i.uniquename = %s')\n isos = connect(isoforms,xint,conn)\n return(isos)",
"def get(self):\n return GenericGet().get_catalogs()",
"def Imprints(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('imprints', default)\n return [HEP.ImprintObject(i) for i in tmp]",
"def fusion_api_get_connections(self, uri=None, param='', api=None, headers=None):\n return self.conn.get(uri=uri, api=api, headers=headers, param=param)",
"def stack ( self ) :\n if not self._ictxs :\n raise RuntimeError , \" Invalid pointer to IIncidentSvc \"\n # \n algs = self._ictxs.algorithms()\n #\n result = []\n for a in algs : result.append ( iAlgorithm ( a.name() , a ) ) \n # \n return result",
"def stack ( self ) :\n if not self._ictxs :\n raise RuntimeError , \" Invalid pointer to IIncidentSvc \"\n # \n algs = self._ictxs.algorithms()\n #\n result = []\n for a in algs : result.append ( iAlgorithm ( a.name() , a ) ) \n # \n return result",
"def getAllAPI():\n list_strain = StrainAPI().get_all()\n schema = StrainSchema()\n results = schema.load(list_strain, many=True)\n return results",
"def get_paginator(operation_name=None):\n pass"
] | [
"0.5350622",
"0.5300352",
"0.5278391",
"0.519298",
"0.50592476",
"0.5055075",
"0.49493715",
"0.4927237",
"0.48888415",
"0.48766434",
"0.48765424",
"0.48755267",
"0.4841486",
"0.48072767",
"0.47609085",
"0.47599703",
"0.47382537",
"0.47320825",
"0.47285804",
"0.47160056",
"0.46968836",
"0.4677136",
"0.4669768",
"0.46655828",
"0.46572328",
"0.4653291",
"0.46431443",
"0.46431443",
"0.4632449",
"0.4625788"
] | 0.6775317 | 0 |
Deletes a SAS Interconnect from the appliance based on name OR uri [Arguments] | def fusion_api_delete_sas_interconnect(self, name=None, uri=None, api=None, headers=None):
return self.sasics.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_sas_lig(self, name=None, uri=None, api=None, headers=None):\n return self.saslig.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_sas_li(self, name=None, uri=None, api=None, headers=None):\n return self.sasli.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def fusion_api_delete_sas_logical_jbods(self, uri, api=None, headers=None):\n return self.sas_logical_jbods.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def delete_interconnect(interconnectId=None):\n pass",
"def _delete_bridge(self, method, api, header, data):\n self._execute_api(method, api, header)",
"def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)",
"def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def RevokeAccessURI(self) -> None:\n logger.info('Revoking SAS URI for snapshot {0:s}'.format(self.name))\n request = self.compute_client.snapshots.begin_revoke_access(\n self.resource_group_name, self.name)\n request.wait()\n logger.info('SAS URI revoked for snapshot {0:s}'.format(self.name))",
"def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0",
"def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')",
"def catalog_alias_delete(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n alias.delete_ermrest_alias(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e",
"def fusion_api_delete_lig(self, name=None, uri=None, api=None, headers=None, etag=None):\n return self.lig.delete(name=name, uri=uri, api=api, headers=headers, etag=etag)",
"def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)",
"def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data",
"def delete_endpoint(EndpointName=None):\n pass",
"def bdev_aio_delete(client, name):\n params = {'name': name}\n return client.call('bdev_aio_delete', params)",
"def deleteStudy(self, study_id, full_delete):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.study_delete', [study_id, full_delete])",
"def delete():",
"def delete_remote_access_session(arn=None):\n pass",
"def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)",
"def delete(self, host, file):",
"def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)",
"def bdev_ocf_delete(client, name):\n params = {'name': name}\n\n return client.call('bdev_ocf_delete', params)",
"def delete(self, path):\n client = self.connect(VAULT_TOKEN)\n client.delete(path)"
] | [
"0.71534884",
"0.70916635",
"0.6285682",
"0.6112178",
"0.60924864",
"0.6046912",
"0.6028701",
"0.60073143",
"0.5985514",
"0.597276",
"0.59508693",
"0.59472984",
"0.5895157",
"0.5878184",
"0.58633906",
"0.5862751",
"0.5804604",
"0.5801492",
"0.576765",
"0.5736654",
"0.56880414",
"0.5684058",
"0.5676417",
"0.56636757",
"0.56595016",
"0.5647614",
"0.5646447",
"0.5626421",
"0.5587296",
"0.5580453"
] | 0.8076723 | 0 |
Refreshes a SAS Interconnect using the PATCH http verb. [Arguments] | def fusion_api_refresh_sas_interconnect(self, body=None, uri=None, param='', api=None, headers=None):
param = "/refreshState%s" % param
return self.sasics.patch(body=body, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_patch_sas_interconnect(self, body=None, uri=None, api=None, headers=None):\n return self.sasics.patch(body=body, uri=uri, api=api, headers=headers)",
"def fusion_api_patch_interconnect(self, body, uri, param='', api=None, headers=None):\n return self.ic.patch(body=body, uri=uri, api=api, headers=headers, param=param)",
"def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)",
"def httpPatch(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('PATCH', url, data, params, headers)",
"def PatchConcepts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def patch(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError",
"def fusion_api_patch_sas_li(self, body=None, uri=None, api=None, headers=None):\n return self.sasli.patch(body, uri, api, headers)",
"def patch(self, url_or_path, *args, **kwargs):\n return self.request.patch(url_or_path, *args, **kwargs).json()",
"def patch(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"PATCH\", endpoint, content, params)",
"def patch(url, data=None, **kwargs):\n\n return request('patch', url, data=data, **kwargs)",
"def simulate_patch(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PATCH', path, **kwargs)",
"def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)",
"def patch(self, endpoint, params=None, data=None):\n params = params or dict()\n data = data or dict()\n return self.request(verb=requests.patch, address=self.project_address + endpoint,\n params=params, data=data)",
"def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})",
"def patch(self):\n\n if session.get(\"login\",False) is not True:\n return {\n \"errno\": 699,\n \"describe\": \"需要登录\"\n }\n\n id = request.form.get(\"id\")\n content = request.form.get(\"content\")\n hashtag = request.form.get(\"hashtag\")\n\n hashtag = [] if hashtag == None or hashtag == \"\" else hashtag.split( \",\" )\n if isinstance(hashtag, str):\n hashtag = json.loads(hashtag)\n\n edit_doc(id, content, hashtag)\n\n return {\"errno\":0}",
"def patch(self, uri, data=None, **kwargs):\n return self.session.patch(uri, data=data, **kwargs)",
"def _patch(self, url, json=None, **kwargs):\n kwargs = Connection._prepare_json_payload(json, **(kwargs or {}))\n return self._http.patch(self.cluster + url, timeout=self.timeout, **(kwargs or {}))",
"def patch(self, url, body=None, headers=None):\n return self._request('PATCH', url, body, headers)",
"def patch(self, request , pk=None):\n return Response({'message':'PATCH'})",
"def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None):\n return self.enclosure.patch(body, uri, api, headers, etag)",
"def patch(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'patch', api_path, *args, **kwargs)",
"def patch(self, request, pk=None):\n\n return Response({'method': 'patch'})",
"def patch(self,request,pk = None):\n return Response({'method': 'PATCH'})",
"def patch(url, to_error=_default_to_error, data=None, **kwargs):\n\n return request('patch', url, to_error=to_error, data=data, **kwargs)",
"def fusion_api_edit_rack(self, body, uri, api=None, headers=None):\n return self.rack.update(body, uri, api, headers)",
"def patch(self, *args, **kwargs):\n return self.handle_patch_request()",
"def client_patch(self, path, data=None, content_type=client.MULTIPART_CONTENT, follow=False, **extra):\r\n\r\n data = data or {}\r\n response = super(client.Client, self).patch(path, data=data, content_type=content_type, **extra)\r\n if follow:\r\n response = self._handle_redirects(response, **extra)\r\n return response",
"async def patch(url, session=None, **kwargs):\n\n method = 'PATCH'\n resp = await _request(method, url, session=session, **kwargs)\n return resp",
"def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})",
"def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})"
] | [
"0.6675101",
"0.627341",
"0.6237193",
"0.6141386",
"0.6027831",
"0.5904484",
"0.58985615",
"0.5815906",
"0.5799898",
"0.57866615",
"0.5774356",
"0.5731663",
"0.5718279",
"0.57136154",
"0.5711923",
"0.5688395",
"0.55969775",
"0.5579017",
"0.55683225",
"0.55654866",
"0.5550302",
"0.5523355",
"0.5500096",
"0.54984146",
"0.548846",
"0.5478898",
"0.5440243",
"0.5439787",
"0.54388374",
"0.54388374"
] | 0.65348923 | 1 |
Creates a completed SAS LIG request body [Arguments] | def fusion_api_create_sas_lig_payload(self, body, api=None):
return self.saslig.make_body(body=body, api=api) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def LATCH_create(bytes=None, rotationInvariance=None, half_ssd_size=None, sigma=None): # real signature unknown; restored from __doc__\n pass",
"def fusion_api_create_sas_lig(self, body, api=None, headers=None):\n return self.saslig.create(body, api, headers)",
"def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"payload\", type=str, location=\"form\")\n parser.add_argument(\"interpreter\", type=str, location=\"form\")\n parser.add_argument(\"input_file_path\", type=str, location=\"form\")\n args = parser.parse_args()\n try:\n payload = base64.b64decode(args[\"payload\"]).decode()\n except KeyError:\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"payload (base64) parameter is required\"\n )\n except UnicodeError:\n return errors.all_errors(\n \"UNICODE_ERROR\", \"payload (str) does not seems to be a valid base64\"\n )\n except Exception as err:\n return errors.all_errors(type(err).__name__, err)\n\n try:\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n\n # Basic Input verification\n check_job_name = re.search(r\"#PBS -N (.+)\", payload)\n check_job_project = re.search(r\"#PBS -P (.+)\", payload)\n\n if check_job_name:\n sanitized_job_name = re.sub(\n r\"\\W+\", \"\", check_job_name.group(1)\n ) # remove invalid char,space etc...\n payload = payload.replace(\n \"#PBS -N \" + check_job_name.group(1),\n \"#PBS -N \" + sanitized_job_name,\n )\n else:\n sanitized_job_name = \"\"\n\n if check_job_project:\n sanitized_job_project = re.sub(\n r\"\\W+\", \"\", check_job_project.group(1)\n ) # remove invalid char,space etc...\n payload = payload.replace(\n \"#PBS -P \" + check_job_project.group(1),\n \"#PBS -P \" + sanitized_job_project,\n )\n\n if args[\"interpreter\"] is None:\n interpreter = config.Config.PBS_QSUB\n else:\n interpreter = args[\"interpreter\"]\n try:\n random_id = \"\".join(\n random.choice(string.ascii_letters + string.digits)\n for i in range(10)\n )\n job_submit_file = \"job_submit_\" + str(random_id) + \".sh\"\n\n group_ownership = f\"{request_user}{config.Config.GROUP_NAME_SUFFIX}\"\n if args[\"input_file_path\"]:\n job_output_path = args[\"input_file_path\"]\n else:\n # create new job directory if needed\n job_output_folder = (\n config.Config.USER_HOME\n + \"/\"\n + request_user\n + \"/soca_job_output/\"\n )\n job_output_path = (\n job_output_folder + sanitized_job_name + \"_\" + str(random_id)\n )\n os.makedirs(job_output_path)\n os.chmod(job_output_folder, 0o700)\n shutil.chown(\n job_output_folder, user=request_user, group=group_ownership\n )\n shutil.chown(\n job_output_path, user=request_user, group=group_ownership\n )\n os.chmod(job_output_path, 0o700)\n\n os.chdir(job_output_path)\n with open(job_submit_file, \"w\") as text_file:\n text_file.write(payload)\n shutil.chown(\n job_output_path + \"/\" + job_submit_file,\n user=request_user,\n group=group_ownership,\n )\n os.chmod(job_output_path + \"/\" + job_submit_file, 0o700)\n submit_job_command = interpreter + \" \" + job_submit_file\n\n launch_job = subprocess.check_output(\n [\"su\", request_user, \"-c\", submit_job_command],\n stderr=subprocess.PIPE,\n )\n if interpreter == config.Config.PBS_QSUB:\n job_id = ((launch_job.decode(\"utf-8\")).rstrip().lstrip()).split(\n \".\"\n )[0]\n return {\"success\": True, \"message\": str(job_id)}, 200\n else:\n return {\n \"success\": True,\n \"message\": \"Your Linux command has been executed successfully. Output (if any) can be accessed on <a href='/my_files?path=\"\n + job_output_path\n + \"'>\"\n + job_output_path\n + \"</a>\",\n }, 200\n\n except subprocess.CalledProcessError as e:\n return {\n \"success\": False,\n \"message\": {\n \"error\": \"Unable to submit the job. Please verify your script file (eg: malformed inputs, syntax error, extra space in the PBS variables ...) or refer to the 'stderr' message.\",\n \"stderr\": \"{}\".format(\n e.stderr.decode(sys.getfilesystemencoding())\n ),\n \"stdout\": \"{}\".format(\n e.output.decode(sys.getfilesystemencoding())\n ),\n \"job_script\": str(payload),\n },\n }, 500\n\n except Exception as err:\n return {\n \"success\": False,\n \"message\": {\n \"error\": \"Unable to run Qsub command.\",\n \"trace\": str(err),\n \"job_script\": str(payload),\n },\n }, 500\n\n except Exception as err:\n return errors.all_errors(type(err).__name__, err)",
"def __init__(__self__, *,\n chain_of_custody_sas_key: str,\n contact_details: 'outputs.ContactDetailsResponse',\n copy_log_details: Sequence[Any],\n copy_progress: Sequence['outputs.CopyProgressResponse'],\n delivery_package: 'outputs.PackageShippingDetailsResponse',\n destination_account_details: Sequence[Any],\n error_details: Sequence['outputs.JobErrorDetailsResponse'],\n job_details_type: str,\n job_stages: Sequence['outputs.JobStagesResponse'],\n return_package: 'outputs.PackageShippingDetailsResponse',\n reverse_shipment_label_sas_key: str,\n shipping_address: 'outputs.ShippingAddressResponse',\n device_password: Optional[str] = None,\n expected_data_size_in_tera_bytes: Optional[int] = None,\n preferences: Optional['outputs.PreferencesResponse'] = None):\n pulumi.set(__self__, \"chain_of_custody_sas_key\", chain_of_custody_sas_key)\n pulumi.set(__self__, \"contact_details\", contact_details)\n pulumi.set(__self__, \"copy_log_details\", copy_log_details)\n pulumi.set(__self__, \"copy_progress\", copy_progress)\n pulumi.set(__self__, \"delivery_package\", delivery_package)\n pulumi.set(__self__, \"destination_account_details\", destination_account_details)\n pulumi.set(__self__, \"error_details\", error_details)\n pulumi.set(__self__, \"job_details_type\", 'DataBoxHeavy')\n pulumi.set(__self__, \"job_stages\", job_stages)\n pulumi.set(__self__, \"return_package\", return_package)\n pulumi.set(__self__, \"reverse_shipment_label_sas_key\", reverse_shipment_label_sas_key)\n pulumi.set(__self__, \"shipping_address\", shipping_address)\n if device_password is not None:\n pulumi.set(__self__, \"device_password\", device_password)\n if expected_data_size_in_tera_bytes is not None:\n pulumi.set(__self__, \"expected_data_size_in_tera_bytes\", expected_data_size_in_tera_bytes)\n if preferences is not None:\n pulumi.set(__self__, \"preferences\", preferences)",
"def fusion_api_create_lig_payload(self, **kwargs):\n\n return self.lig.make_body(**kwargs)",
"def fusion_api_create_lsg(self, body, api=None, headers=None):\n return self.lsg.create(body, api, headers)",
"def __init__(__self__, *,\n chain_of_custody_sas_key: str,\n contact_details: 'outputs.ContactDetailsResponse',\n copy_log_details: Sequence[Any],\n copy_progress: Sequence['outputs.DataBoxDiskCopyProgressResponse'],\n delivery_package: 'outputs.PackageShippingDetailsResponse',\n destination_account_details: Sequence[Any],\n disks_and_size_details: Mapping[str, int],\n error_details: Sequence['outputs.JobErrorDetailsResponse'],\n job_details_type: str,\n job_stages: Sequence['outputs.JobStagesResponse'],\n return_package: 'outputs.PackageShippingDetailsResponse',\n reverse_shipment_label_sas_key: str,\n shipping_address: 'outputs.ShippingAddressResponse',\n expected_data_size_in_tera_bytes: Optional[int] = None,\n passkey: Optional[str] = None,\n preferences: Optional['outputs.PreferencesResponse'] = None,\n preferred_disks: Optional[Mapping[str, int]] = None):\n pulumi.set(__self__, \"chain_of_custody_sas_key\", chain_of_custody_sas_key)\n pulumi.set(__self__, \"contact_details\", contact_details)\n pulumi.set(__self__, \"copy_log_details\", copy_log_details)\n pulumi.set(__self__, \"copy_progress\", copy_progress)\n pulumi.set(__self__, \"delivery_package\", delivery_package)\n pulumi.set(__self__, \"destination_account_details\", destination_account_details)\n pulumi.set(__self__, \"disks_and_size_details\", disks_and_size_details)\n pulumi.set(__self__, \"error_details\", error_details)\n pulumi.set(__self__, \"job_details_type\", 'DataBoxDisk')\n pulumi.set(__self__, \"job_stages\", job_stages)\n pulumi.set(__self__, \"return_package\", return_package)\n pulumi.set(__self__, \"reverse_shipment_label_sas_key\", reverse_shipment_label_sas_key)\n pulumi.set(__self__, \"shipping_address\", shipping_address)\n if expected_data_size_in_tera_bytes is not None:\n pulumi.set(__self__, \"expected_data_size_in_tera_bytes\", expected_data_size_in_tera_bytes)\n if passkey is not None:\n pulumi.set(__self__, \"passkey\", passkey)\n if preferences is not None:\n pulumi.set(__self__, \"preferences\", preferences)\n if preferred_disks is not None:\n pulumi.set(__self__, \"preferred_disks\", preferred_disks)",
"def call_asr():\n\ttic = time.time()\n\treq = flask.request.data.decode(\"utf-8\")\n\taudio_arr = flask.json.loads(req)[\"data\"]\n\twav = np.array(audio_arr, np.float32)\n\t# normalize ([-1:1] normalization)\n\twav = normalize_audio(wav, method=\"-1_1\")\n\t# reduce noise (comment it to make ASR a bit faster)\n\twav = reduce_noise(wav, method=\"wiener\")\n\t# write the recorded audio (for debugging reasons)\n\t# wavfile.write(filename=\"recorded.wav\", rate=16000, data=wav)\n\t# transcribe the provided data\n\tout = asr_model.transcribe(wav)\n\ttoc = time.time()\n\tapp.logger.info(\"ASR Model Transcription: \"+out)\n\tapp.logger.info(\"ASR Duration: {} seconds\".format(toc-tic))\n\t# form response\n\tflask_response= app.response_class(response=flask.json.dumps({\"text\": out}),\n\t\t\t\t\t\t\t\t\t\tstatus=200,\n\t\t\t\t\t\t\t\t\t\tmimetype='application/json' )\n\treturn flask_response",
"def request_ims_data():\n input_check_results = input_validation.check_inputs(request)\n if input_check_results is not None:\n return error_response(input_check_results) \n\n time_range = request.json['timeRange']\n station = request.json['station']\n\n # Max msg_id size = 20 characters, so a UUID is too long; instead generate a random 20 digit number for\n # the random message id to represent this request and its associated response\n msg_id = str(random.randint(1,99999999999999999999))\n [request_filename, request_file_path, response_filename, response_file_path] = file_utils.create_filenames(msg_id, base_dir)\n \n # Write request to a file \n request_text = file_utils.create_waveform_request(msg_id, time_range, station) \n request_file = open(request_file_path, 'w')\n request_file.write(request_text)\n request_file.close()\n\n # Calling via subprocess is a little icky, but if we call this way:\n # nms_client.batch.batchclient.bootstrap_run()\n # then we have to manually mess with sys.argv to put the filename in the command line arguments. This is unlikely\n # to hold up well with multiple requests running simultaneously\n subprocess_output = subprocess.check_output(['/ims2/nms_client/bin/nms_client.sh', '-d', base_dir, '-f', response_filename, request_file_path])\n os.remove(request_file_path)\n\n # Read response file if it was written. \n if os.path.isfile(response_file_path):\n response_json = file_utils.response_file_to_json(response_file_path) \n os.remove(response_file_path) \n return response_json\n # No response file means there was some sort of error; return the output from calling the client. \n else:\n return error_response(subprocess_output)",
"def alloc_request():\n return SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)",
"def sign(self, body, external_aad, private_key):",
"def dorequest( request, body=None, chunk=None, trailers=None ):",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def handle(req):\n # redirecting sstdout \n original = sys.stdout\n sys.stdout = open('file', 'w')\n\n json_data = json.loads(req)\n region_data = pd.DataFrame(json_data['region_definition'])\n train_data = pd.DataFrame(json_data['train_data'])\n\n x_frame = np.array(region_data[['lng', 'lat']])\n x_id = np.array(region_data['id'])\n x_coords = np.array(train_data[['lng', 'lat']])\n n_trials = np.array(train_data['n_trials'])\n n_positive = np.array(train_data['n_positive'])\n threshold = json_data['request_parameters']['threshold']\n\n response = adaptive_prototype_0(x_frame=x_frame, x_id=x_id,\n\t\t\t\t x_coords=x_coords,\n\t\t\t\t n_positive=n_positive,\n\t\t\t\t n_trials=n_trials,\n\t\t\t\t threshold=threshold,\n\t\t\t\t covariate_layers=None)\n sys.stdout = original\n print(json.dumps(response), end='')",
"def make_request(dbname='default'):\n num_beams = get_num_to_request()\n if not num_beams:\n # Request size is 0\n return\n dlm_cout.outs(\"Requesting data\\nIssuing a request of size %d\" % num_beams)\n\n # Ask to restore num_beams\n db = database.Database(dbname)\n QUERY = \"SELECT f.obs_id FROM full_processing as f LEFT JOIN processing AS p ON f.obs_id = p.obs_id WHERE f.status='available' AND p.details is NULL LIMIT %d\"%num_beams\n db.cursor.execute(QUERY)\n obs_ids = [row[0] for row in db.cursor.fetchall()]\n\n # Ask for an uuid\n QUERY = \"SELECT UUID();\"\n db.cursor.execute(QUERY)\n guid = db.cursor.fetchone()[0]\n\n if not obs_ids:\n print \"There are no files to be restored.\"\n return\n\n # Mark the beams for restorations\n for obs_id in obs_ids:\n QUERY = \"UPDATE full_processing SET status='requested', guid='%s', updated_at=NOW() WHERE obs_id=%s\"%(guid, obs_id)\n db.cursor.execute(QUERY)\n db.conn.close()\n\n #if guid == \"fail\":\n # raise pipeline_utils.PipelineError(\"Request for restore returned 'fail'.\")\n\n requests = jobtracker.query(\"SELECT * FROM requests WHERE guid='%s'\" % guid)\n\n if requests:\n # Entries in the requests table exist with this GUID!?\n raise pipeline_utils.PipelineError(\"There are %d requests in the \" \\\n \"job-tracker DB with this GUID %s\" % \\\n (len(requests), guid))\n\n jobtracker.query(\"INSERT INTO requests ( \" \\\n \"numbits, \" \\\n \"numrequested, \" \\\n \"file_type, \" \\\n \"guid, \" \\\n \"created_at, \" \\\n \"updated_at, \" \\\n \"status, \" \\\n \"details) \" \\\n \"VALUES (%d, %d, '%s', '%s', '%s', '%s', '%s', '%s')\" % \\\n (config.download.request_numbits, num_beams, \\\n config.download.request_datatype, guid, \\\n jobtracker.nowstr(), jobtracker.nowstr(), 'waiting', \\\n 'Newly created request'))",
"def create(self):\n\t\t\n\t\tflagbyte = 0\n\t\tif self.synf: flagbyte += 1\n\t\tif self.ackf: flagbyte += 2\n\t\t\n\t\tself.header = struct.pack(\">IBIII\", self.connid, flagbyte, self.seqn, self.ackn, self.recv)\n\t\t\n\t\tself.data = self.header+self.payload",
"def req_handler(args):\n key = _get_key(args)\n subject = get_subject_arguments()\n req = create_certificate_request(key, subject=subject, file_name=args.req_out)\n if not args.req_out:\n print(print_certificate_request(req))\n return req",
"def create_body_dict(name, schema):\n body_dict = {}\n body_dict['in'] = 'body'\n body_dict['name'] = name\n body_dict['schema'] = schema\n body_dict['description'] = 'ID of ' + name\n body_dict['required'] = True\n return body_dict",
"def request_initialization(self) -> global___Snippet.StreamingRequestInitialization:",
"def request_initialization(self) -> global___Snippet.StreamingRequestInitialization:",
"async def package_request(self, request):\n json_dict = request.get_json_data_dict(JSONFlag.NET)\n\n # Make signature.\n vasp = self.vasp\n my_key = vasp.info_context.get_my_compliance_signature_key(\n self.get_my_address().as_str()\n )\n json_string = await my_key.sign_message(json.dumps(json_dict))\n\n net_message = NetMessage(\n self.myself,\n self.other,\n CommandRequestObject,\n json_string,\n request\n )\n\n return net_message",
"def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}",
"async def app(scope, receive, send):\n html = b\"\"\"\n <!doctype html>\n <html>\n <head>\n <title>Hello ASGI!</title>\n </head>\n <body>\n <main>\n <h1>Hello ASGI!</h1>\n </main>\n </body>\n </html>\n \"\"\"\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 200,\n \"headers\": [[b\"content-type\", b\"text/html\"], [b\"content-length\", b\"269\"],],\n }\n )\n await send(\n {\"type\": \"http.response.body\", \"body\": html, \"more_body\": False,}\n )",
"def post(self):\n dataSimulator = DataProducer()\n dataSimulator.produceData()\n return Response(\"loaded\", 202, {'Content-Type': 'text/plaintext'})",
"def create():\n config = request.data\n return add_scheduling_block(config)",
"def build_request(self):\n self.build_header_2_40()\n self.build_fullprops()\n data_compressed = mcafee_crypto.mcafee_compress(self.agent_pubkey_epo_format + self.fullprops_xml)\n data_len = struct.pack('<I', len(data_compressed))\n final_header_len = struct.pack('<I', len(self.build_header_1()) + len(self.build_header_2_40()))\n self.build_header_1(final_header_len, data_len)\n final_header_1 = mcafee_crypto.xor_c(self.header_1)\n request_signature = mcafee_crypto.dsa_sign(self.regkey, self.header_1 + self.header_2 + data_compressed)\n data_encrypted = mcafee_crypto.mcafee_3des_encrypt(self.header_2 + data_compressed + request_signature)\n post_data = mcafee_crypto.xor_c(final_header_1) + data_encrypted\n return post_data",
"def bundler(event, context):\n fhir_access_token = get_fhir_access_token()\n\n message = base64.b64decode(event['data']).decode('utf-8')\n bundle_run = event['attributes']['bundle_run']\n bundle_group = event['attributes']['bundle_group']\n patient_id = event['attributes']['patient_id']\n gcp_project = event['attributes']['gcp_project']\n gcp_location = event['attributes']['gcp_location']\n gcp_bucket = event['attributes']['gcp_bucket']\n gcp_dataset = event['attributes']['gcp_dataset']\n gcp_fhirstore = event['attributes']['gcp_fhirstore']\n\n starttime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n bundle, resp_fhir = send_bundle_to_healthcare_api(\n message, fhir_access_token, gcp_project, gcp_location, gcp_dataset,\n gcp_fhirstore\n )\n endtime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(resp_fhir)\n\n # Error will show up when the Healthcare API is unresponsive or crashes\n if 'error' in resp_fhir:\n print(bundle['id'])\n print(bundle)\n store_bad_bundle_in_cloud_storage(\n resp_fhir, gcp_bucket, bundle, bundle_run, error_key='error'\n )\n log_error_to_bigquery(\n gcp_project,\n patient_id,\n bundle_group,\n bundle['id'],\n bundle_run,\n resp_fhir['error'],\n err_flg=True\n )\n # OperationOutcome will be returned when a validation issue has been found\n elif resp_fhir['resourceType'] == 'OperationOutcome':\n print(bundle['id'])\n print(bundle)\n store_bad_bundle_in_cloud_storage(\n resp_fhir, gcp_bucket, bundle, bundle_run\n )\n log_error_to_bigquery(\n gcp_project, patient_id, bundle_group, bundle['id'], bundle_run,\n resp_fhir['issue'][0]\n )\n else:\n log_pass_to_bigquery(\n gcp_project, patient_id, bundle_group, bundle['id'], bundle_run,\n starttime, endtime\n )",
"def create_request(v1):\n #get entered data\n data = request.get_json()\n\n #picking the request attributes\n req_title = data.get(\"request_title\")\n req_desc = data.get(\"request_description\")\n requester_name = \"Gideon\"\n req_id = len(all_requests) +1 # + random.randint(1, 3000)\n\n #validation\n if not req_title:\n return jsonify({\"message\": \"Request has no title\"}), 400\n if not req_desc:\n return jsonify({\"message\": \"Request has no description\"}), 400\n if not requester_name:\n return jsonify({\"message\": \"Request must be issued by a user\"}), 400\n if not req_id:\n return jsonify({\"message\": \"Request has no id\"}), 400\n\n #storing entered request\n new_request = MaintenanceRequest(req_title, req_desc, requester_name, req_id)\n all_requests.append(new_request)\n # new_number_of_requests = len(all_requests)\n\n return jsonify({\n \"message\":\"sucessfully created request\",\n 'request_title':new_request.title,\n \"request_description\":new_request.description,\n \"requester_name\" : new_request.requester_name,\n \"request_id\" : new_request.request_id\n })"
] | [
"0.6090915",
"0.60387844",
"0.57954663",
"0.5706829",
"0.5560434",
"0.5415134",
"0.53773665",
"0.50657904",
"0.50618273",
"0.50520015",
"0.5018075",
"0.48289573",
"0.48109937",
"0.48109937",
"0.48109937",
"0.4776612",
"0.4772994",
"0.47353542",
"0.47049215",
"0.46958983",
"0.46853018",
"0.46853018",
"0.46791095",
"0.46561354",
"0.4649938",
"0.46419817",
"0.4631842",
"0.46256185",
"0.46192735",
"0.46007842"
] | 0.6312348 | 0 |
Deletes a SAS LIG from the appliance based on name OR uri [Arguments] | def fusion_api_delete_sas_lig(self, name=None, uri=None, api=None, headers=None):
return self.saslig.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_sas_li(self, name=None, uri=None, api=None, headers=None):\n return self.sasli.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_sas_interconnect(self, name=None, uri=None, api=None, headers=None):\n return self.sasics.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def fusion_api_delete_sas_logical_jbods(self, uri, api=None, headers=None):\n return self.sas_logical_jbods.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_delete_lig(self, name=None, uri=None, api=None, headers=None, etag=None):\n return self.lig.delete(name=name, uri=uri, api=api, headers=headers, etag=etag)",
"def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()",
"def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def RevokeAccessURI(self) -> None:\n logger.info('Revoking SAS URI for snapshot {0:s}'.format(self.name))\n request = self.compute_client.snapshots.begin_revoke_access(\n self.resource_group_name, self.name)\n request.wait()\n logger.info('SAS URI revoked for snapshot {0:s}'.format(self.name))",
"def cluster_application_record_delete(self, record_name):\n return self.request( \"cluster-application-record-delete\", {\n 'record_name': [ record_name, 'record-name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def bdev_passthru_delete(client, name):\n params = {'name': name}\n return client.call('bdev_passthru_delete', params)",
"def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)",
"def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)",
"def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)",
"def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)",
"def delete_suggester(DomainName=None, SuggesterName=None):\n pass",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete_upload(arn=None):\n pass",
"def bdev_aio_delete(client, name):\n params = {'name': name}\n return client.call('bdev_aio_delete', params)",
"def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def bdev_ocf_delete(client, name):\n params = {'name': name}\n\n return client.call('bdev_ocf_delete', params)",
"def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))",
"def delete_analysis_scheme(DomainName=None, AnalysisSchemeName=None):\n pass",
"def delete(self, name):\n self.backend.delete(name)",
"def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)"
] | [
"0.71557397",
"0.6853078",
"0.66669387",
"0.6061135",
"0.58285147",
"0.58012325",
"0.5788677",
"0.57693595",
"0.5742112",
"0.5723283",
"0.5683105",
"0.559175",
"0.5587851",
"0.5586312",
"0.55806655",
"0.5496503",
"0.547739",
"0.54621804",
"0.5460287",
"0.5457482",
"0.5445741",
"0.5444654",
"0.5411127",
"0.54097867",
"0.5399851",
"0.5398496",
"0.5371194",
"0.53711396",
"0.53558207",
"0.5352108"
] | 0.7343126 | 0 |
Gets a default or paginated collection of SAS LIGs. [Arguments] | def fusion_api_get_sas_lig(self, uri=None, param='', api=None, headers=None):
return self.saslig.get(uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_sas_li(self, uri=None, param='', api=None, headers=None):\n return self.sasli.get(uri=uri, param=param, api=api, headers=headers)",
"def get_germplasm(\n germplasm_p_u_i: Optional[str] = Query(None, alias='germplasmPUI'),\n germplasm_db_id: Optional[str] = Query(None, alias='germplasmDbId'),\n germplasm_name: Optional[str] = Query(None, alias='germplasmName'),\n common_crop_name: Optional[str] = Query(None, alias='commonCropName'),\n accession_number: Optional[str] = Query(None, alias='accessionNumber'),\n collection: Optional[str] = None,\n genus: Optional[str] = None,\n species: Optional[str] = None,\n study_db_id: Optional[str] = Query(None, alias='studyDbId'),\n synonym: Optional[str] = None,\n parent_db_id: Optional[str] = Query(None, alias='parentDbId'),\n progeny_db_id: Optional[str] = Query(None, alias='progenyDbId'),\n external_reference_i_d: Optional[str] = Query(None, alias='externalReferenceID'),\n external_reference_source: Optional[str] = Query(\n None, alias='externalReferenceSource'\n ),\n page: Optional[int] = None,\n page_size: Optional[int] = Query(None, alias='pageSize'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n) -> GermplasmListResponse:\n pass",
"def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):\n return self.ls.get(uri=uri, api=api, headers=headers, param=param)",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def fusion_api_get_lsg(self, uri=None, param='', api=None, headers=None):\n return self.lsg.get(uri=uri, param=param, api=api, headers=headers)",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def get_all_asgs(cluster_tag):\n return get_asgs(cluster_tag, [])",
"def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]",
"def getLSData(*args):\n return args[0].Data.LSData.ls_data",
"def sr_list(call=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n srs = session.xenapi.SR.get_all()\n for sr in srs:\n sr_record = session.xenapi.SR.get_record(sr)\n ret[sr_record[\"name_label\"]] = sr_record\n return ret",
"def getSubsampleList(vcfname, ss_count):\n\n vcf_o = pysam.VariantFile(vcfname)\n rec = next(vcf_o)\n vcf_o.close()\n lst = []\n for samp in rec.samples:\n lst.append(samp)\n return lst[:int(ss_count)]",
"def getAllGlids(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('glidList')\n\t\treturn deserialize_list_Glid_json(payload)",
"def get_datasets(sim_args):\n if len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'all':\n data_tags = [\n 'Webscope_C14_Set1',\n 'Webscope_C14_Set2',\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'CIKM2017':\n data_tags = [\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'letor64':\n data_tags = [\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n ]\n # random.shuffle(data_tags)\n else:\n data_tags = sim_args.data_folders\n for data_tag in data_tags:\n assert data_tag in DATASET_COLLECTION, 'Command line input is currently not supported.'\n yield DATASET_COLLECTION[data_tag]",
"def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])",
"def lego_sets():\n # you must replace this line and return your own list\n return []",
"def fusion_api_get_sas_interconnects(self, uri=None, param='', api=None, headers=None):\n return self.sasics.get(uri=uri, api=api, headers=headers, param=param)",
"def get_bucketlist():\n pass",
"def lego_sets():\n \n \n data_test=data_specific\n\n\n\n \n \n\n \n print(data_test)\n print(\"The size of the data is: \",len(data_test))\n \n \n \n # you must replace this line and return your own list\n return data_test",
"def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list",
"def get_collections(self): # real signature unknown; restored from __doc__\n return []",
"def get_all(self, name):\n\t\tpass",
"def fusion_api_get_sas_li_logical_drive_enclosures(self, uri=None, param='', api=None, headers=None):\n param = \"/logical-drive-enclosures%s\" % param\n return self.sasli.get(uri=uri, param=param, api=api, headers=headers)",
"def get(self):\n return GenericGet().get_catalogs()",
"def getCatalogs():",
"def queryList():\n #f = open(\"/var/log/scidbpy_log.txt\",\"w+\")\n #f.write(\"starting queryList\")\n\n header, rows = querySciDB(\"list('arrays')\")\n names = [row[1].translate(None, \"\\\"\") for row in rows]\n\n return names",
"def list(self, request, vocab, format=None):\n # What we really want is the vocab, which contains a list of\n # collections\n return redirect(\"/vocabs/\" + vocab)",
"def getLSASpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = tfidf[corpus]\n lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=30)\n corpus_lsi = lsi[corpus_tfidf]\n songMap = {}\n index = 0\n for doc in corpus_lsi:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap",
"def spark_list():\n api.list()"
] | [
"0.54681677",
"0.5346564",
"0.50200975",
"0.49903435",
"0.49566144",
"0.4935703",
"0.4877357",
"0.48610568",
"0.48533508",
"0.4820069",
"0.47606888",
"0.47584435",
"0.47086406",
"0.46721065",
"0.4647679",
"0.4641318",
"0.4630326",
"0.46137437",
"0.4609929",
"0.4598526",
"0.45940414",
"0.45909166",
"0.45832232",
"0.4578807",
"0.4571251",
"0.45682737",
"0.45662296",
"0.45592794",
"0.45574617",
"0.45497206"
] | 0.5688966 | 0 |
Deletes a SAS LI [Arguments] | def fusion_api_delete_sas_li(self, name=None, uri=None, api=None, headers=None):
return self.sasli.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def delete(log, args):\n log('dataset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete dataset command coming soon.')",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def fusion_api_delete_sas_lig(self, name=None, uri=None, api=None, headers=None):\n return self.saslig.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete():",
"def rm(args):\n args.delete = True\n return remove(args)",
"def do_command(self, args):\n subjectops = dbops.TestSubjects()\n subjectops.delete(args)",
"def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0",
"def delete(self, **kwargs):\n\n rst = self.del_sngl_pair(kwargs)\n return rst",
"def do_command(self, args):\n compops = dbops.Completions()\n compops.delete(args)",
"def delete_run(arn=None):\n pass",
"def do_del(self, arg):\n try:\n del_list = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_list_str = \" \".join(del_list)\n print(del_list_str)\n elif choice == \"id\":\n del_list_str = int(\" \".join(del_list))\n print (del_list_str)\n app.ToDoApp.to_delete_todo(del_list_str)\n print (\"List deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')",
"def trelloDeleteCard(self, args): \n\n args = args.split(\" \")\n if len(args) < 2: return \">> Missing arguments\" \n\n listID = args[0] \n if not doesListExist(listID): return \">> This list does not exist\"\n\n cardName = args[1:] \n\n for l in self.lists: \n if l.name == listID: \n for card in l: \n if card.name == cardName:\n card.close() \n return \">> Deleted item!\" \n \n return \">> Item doesn't exist\"",
"def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}",
"def del_stkpnt(*args):\n return _ida_frame.del_stkpnt(*args)",
"def do_delete(self, arg):\n \treturn False",
"def script_delete(ctx: click.Context, name):\n subcommand_script.cmd_delete(ctx.obj, name)",
"def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()",
"def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()",
"def delete_param(command):\n namespace = app.main(command)\n assert namespace.command == 'dp' or namespace.command == \"deleteparam\"\n assert namespace.name == \"test\"",
"def delete(constraint,check=True):\n output = db.query(['jobid','fwid','storage_directory'],constraint,order='jobid')\n for jid,fwid,path in output: \n lpad.archive_wf(fwid) # archive firework\n db.updateDB('deleted','jobid',jid,1,tableName='completed') # note deletion in deleted column\n if not check or ask('Do you want to delete %s?'%path): # delete storage directory \n if 'scratch' in path: shutil.rmtree(path)\n elif 'nfs' in path: \n d = subprocess.Popen(['ssh','[email protected]', 'rm -r %s'%path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n dout, err = d.communicate()\n else: raise NotImplementedError\n print 'deleted!'",
"def removeItem(*args):",
"def removeItem(*args):",
"def remove(self, spo, context=None):\n\n uri = self.rest_services[\"statements\"]\n s,p,o = spo\n payload = dict()\n if s:\n payload[\"subj\"] = s.n3()\n if p:\n payload[\"pred\"] = p.n3()\n if o:\n payload[\"obj\"] = o.n3()\n if context:\n payload[\"context\"] = [context.n3()]\n\n #data = \" \".join(i.n3() for i in spo) +\" .\"\n #print(data)\n r = requests.delete(uri, params=payload)",
"def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])",
"def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")",
"def func(self):\n\n self.caller.execute_cmd('@del ' + self.caller.db.fbat + '-' + self.caller.db.lbat)\n\n #self.caller.msg(\"Command called!\")",
"def _del(self, *args):\n return _ida_frame.xreflist_t__del(self, *args)"
] | [
"0.68888885",
"0.6647529",
"0.6490593",
"0.64002407",
"0.6385851",
"0.62930906",
"0.62801945",
"0.6230487",
"0.6218175",
"0.60973996",
"0.6096202",
"0.606404",
"0.60172856",
"0.59568644",
"0.59202605",
"0.5916887",
"0.59123135",
"0.587879",
"0.5875267",
"0.58624417",
"0.58624417",
"0.58603823",
"0.5859002",
"0.5843469",
"0.5843469",
"0.5839152",
"0.5834223",
"0.58300334",
"0.58256865",
"0.58240587"
] | 0.688791 | 1 |
Gets a default or paginated collection of SAS LIs. [Arguments] | def fusion_api_get_sas_li(self, uri=None, param='', api=None, headers=None):
return self.sasli.get(uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_sas_lig(self, uri=None, param='', api=None, headers=None):\n return self.saslig.get(uri=uri, param=param, api=api, headers=headers)",
"def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit",
"def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):\n return self.ls.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_sas_interconnects(self, uri=None, param='', api=None, headers=None):\n return self.sasics.get(uri=uri, api=api, headers=headers, param=param)",
"def list_silos(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.SILOS_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def sr_list(call=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n srs = session.xenapi.SR.get_all()\n for sr in srs:\n sr_record = session.xenapi.SR.get_record(sr)\n ret[sr_record[\"name_label\"]] = sr_record\n return ret",
"def fusion_api_get_internal_link_sets(self, uri=None, param='', api=None, headers=None):\n return self.ils.get(uri=uri, api=api, headers=headers, param=param)",
"def RetrieveACISA():\n\tdb = DBConnector()\n\tcur = db.cursor()\n\n\tSQLcmd = \"SELECT * FROM snaps.SNAPsLocation\"\n\tcur.execute(SQLcmd)\n\treturnList = []\n\tcount = 0\n\tfor item in cur.fetchall():\n\t\tcount += 1\n\t\ttmplist = [item[1], item[2], count, str(item[0])]\n\t\treturnList.append(tmplist)\n\treturn returnList",
"def getLSData(*args):\n return args[0].Data.LSData.ls_data",
"def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def list(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"_models.StaticSiteCollection\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.StaticSiteCollection\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-01-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('StaticSiteCollection', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )",
"def lsits(self) -> List[LsitsParam]:\n return self._lsits",
"def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieve a list of STS chains\", uri,\n requires_modules=requires_modules,\n requires_version=requires_version)",
"def ISBNs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('isbns', default)\n return [HEP.ISBNObject(i) for i in tmp]",
"def get_all_labs():\n return Lab.query.all()",
"def datasets(self):\n return [Dataset.ENSEMBL]",
"def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]",
"def get(self):\n return GenericGet().get_catalogs()",
"def getSequences(self, show):\n log('Getting list of sequences remotely')\n job = flix.remote.remoteHttpCall.FlixJob('user', 'getSequences')\n proxyHttpCall = flix.remote.remoteHttpCall.ProxyHttpCall()\n procs = flix.remote.ProcConfig()\n maxAttempts = 3\n request = job.newRequest(procs.FILE, 'FlixCore.getSequences', show)\n request.timeout = 60\n try:\n result = proxyHttpCall.makeRequest(request, job, False, maxAttempts)\n except utils.FlixException, e:\n raise utils.FlixExceptionReport(e)\n return result",
"def get(self, *args):\n return _libsbml.ListOf_get(self, *args)",
"def getCatalogs():",
"def spark_list():\n api.list()",
"def _list(self, variables):\n variables_dict = self._get_vars(variables)\n\n container_name = variables_dict.pop('container', None)\n\n filters = {\n 'marker': variables_dict.pop('marker', None),\n 'limit': variables_dict.pop('limit', None),\n 'prefix': variables_dict.pop('prefix', None),\n 'end_marker': variables_dict.pop('end_marker', None)\n }\n\n if container_name:\n list_data = self.swift.get_container(container_name, **filters)[1]\n else:\n list_data = self.swift.get_account(**filters)[1]\n\n return self._facts(facts=list_data)",
"def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit",
"def get_germplasm(\n germplasm_p_u_i: Optional[str] = Query(None, alias='germplasmPUI'),\n germplasm_db_id: Optional[str] = Query(None, alias='germplasmDbId'),\n germplasm_name: Optional[str] = Query(None, alias='germplasmName'),\n common_crop_name: Optional[str] = Query(None, alias='commonCropName'),\n accession_number: Optional[str] = Query(None, alias='accessionNumber'),\n collection: Optional[str] = None,\n genus: Optional[str] = None,\n species: Optional[str] = None,\n study_db_id: Optional[str] = Query(None, alias='studyDbId'),\n synonym: Optional[str] = None,\n parent_db_id: Optional[str] = Query(None, alias='parentDbId'),\n progeny_db_id: Optional[str] = Query(None, alias='progenyDbId'),\n external_reference_i_d: Optional[str] = Query(None, alias='externalReferenceID'),\n external_reference_source: Optional[str] = Query(\n None, alias='externalReferenceSource'\n ),\n page: Optional[int] = None,\n page_size: Optional[int] = Query(None, alias='pageSize'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n) -> GermplasmListResponse:\n pass",
"def get_srr(srs_accession: str, cursor) -> List[str]:\n\n return [\n accession\n for (accession,) in cursor.execute(\n \"select DISTINCT run_accession from sra where sample_accession is ?\", (srs_accession,)\n )\n ]",
"def queryList():\n #f = open(\"/var/log/scidbpy_log.txt\",\"w+\")\n #f.write(\"starting queryList\")\n\n header, rows = querySciDB(\"list('arrays')\")\n names = [row[1].translate(None, \"\\\"\") for row in rows]\n\n return names"
] | [
"0.54077476",
"0.5360556",
"0.5235915",
"0.51525134",
"0.5136321",
"0.5049862",
"0.50423014",
"0.5024832",
"0.49565122",
"0.4950418",
"0.48724526",
"0.48694488",
"0.48643297",
"0.48579445",
"0.4847231",
"0.48399895",
"0.48339093",
"0.48093602",
"0.47833169",
"0.4780453",
"0.47476757",
"0.4744698",
"0.47408253",
"0.47201163",
"0.47160965",
"0.47103703",
"0.4680273",
"0.4676726",
"0.46750247",
"0.46675697"
] | 0.57464606 | 0 |
Gets a default or paginated collection of SAS LI firmware. [Arguments] | def fusion_api_get_sas_li_firmware(self, uri=None, param='', api=None, headers=None):
param = "/firmware%s" % param
return self.sasli.get(uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_firmware_driver(self, uri=None, api=None, headers=None, param=''):\n return self.driver.get(uri, api, headers, param)",
"def fusion_api_get_sas_li(self, uri=None, param='', api=None, headers=None):\n return self.sasli.get(uri=uri, param=param, api=api, headers=headers)",
"def firmwares(self):\n return FirmwareCollection(client=self)",
"def fusion_api_get_server_hardware_firmware(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/firmware')",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def get_devices_lsscsi(self):\n\n try:\n message = \"Find SCSI Devices\"\n if self._include_enclosures:\n command = \"lsscsi --generic --transport | egrep 'disk|0x14|enclo'\"\n else:\n command = \"lsscsi --generic --transport | fgrep 'disk|0x14'\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n #\n # Format:\n # $ lsscsi --generic --transport\n # [0] [1] [2] [3] [4]\n # [0:0:0:0] disk sas:0x5000cca25103b471 /dev/sda /dev/sg0 \n # [0:0:1:0] disk sas:0x5000cca251029301 /dev/sdb /dev/sg1 \n # ...\n # [0:0:14:0] enclosu sas:0x5001636001caa0bd - /dev/sg14\n # [7:0:0:0] cd/dvd usb: 1-1.3:1.2 /dev/sr0 /dev/sg15\n #\n # Special Case:\n # Handle lines without a transport (spaces only). (screen scrapping danger)\n # [0:0:10:0] enclosu sas:0x50030480091d71fd - /dev/sg10\n # [1:0:0:0] disk <spaces> /dev/sdk /dev/sg11 <- INTEL disk!\n #\n # Another SNAFU! (and why I hate screen scrapping!!!)\n # [15:0:53597:0]disk sas:0x5000cca23b359649 /dev/sdg /dev/sg6 \n # [15:0:53598:0]disk sas:0x5000cca23b0c0a99 /dev/sdh /dev/sg7 \n # [15:0:53599:0]disk sas:0x5000cca23b0b7531 /dev/sdi /dev/sg8 \n # ...\n # [15:0:53686:0]enclosu sas:0x5000ccab040001bc - /dev/sg165\n # [15:0:53766:0]enclosu sas:0x5000ccab040001fc - /dev/sg144\n #\n # Evidently, the author of lsscsi did not think of consistent output! ;(\n #\n for line in pdata['stdout'].splitlines():\n dinfo = line.split()\n device = dict()\n if len(dinfo) < 5:\n m = re.search('(?P<device>disk|\\(0x14\\)|enclosu)', dinfo[0])\n if m:\n device['Device Type'] = m.group('device')\n sas_index = 1\n dev_index = 2\n sg_index = 3\n else:\n continue\n else:\n device['Device Type'] = dinfo[1]\n sas_index = 2\n dev_index = 3\n sg_index = 4\n\n # lsscsi does not understand 'Host Managed' device type.\n if '0x14' in device['Device Type']:\n device['Device Type'] = 'disk'\n\n # Parse remaining information.\n if 'sas:' in dinfo[sas_index]:\n device['SAS Address'] = dinfo[sas_index][4:]\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: Enclosure has no driver, so reports '-' for name.\n if '/dev/' in dinfo[dev_index]:\n if self._drives and not dinfo[dev_index] in self._drives:\n continue\n if self._exclude and dinfo[dev_index] in self._exclude:\n continue\n device['Linux Device Name'] = dinfo[dev_index]\n else:\n device['Linux Device Name'] = \"\"\n if '/dev/sg' in dinfo[sg_index]:\n device['SCSI Device Name'] = dinfo[sg_index]\n else:\n device['SCSI Device Name'] = \"\"\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc",
"def flask_get_devices():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n # retrieve pagination\n page_number, per_page = get_pagination(request)\n\n params = {\n 'page_number': page_number,\n 'per_page': per_page,\n 'sortBy': request.args.get('sortBy', None),\n 'attr': request.args.getlist('attr'),\n 'attr_type': request.args.getlist('attr_type'),\n 'label': request.args.get('label', None),\n 'template': request.args.get('template', None),\n 'idsOnly': request.args.get('idsOnly', 'false'),\n }\n\n result = DeviceHandler.get_devices(token, params)\n LOGGER.info(f' Getting latest added device(s).')\n\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def ls(**params):\n params = _clean_salt_variables(params)\n\n endpoint = \"devices\"\n\n # Change endpoint if there are params to filter by:\n if params:\n endpoint = \"resources\"\n\n # Convert all ints to strings:\n for key, val in params.items():\n params[key] = str(val)\n\n api_response = requests.get(\n \"https://api.serverdensity.io/inventory/{}\".format(endpoint),\n params={\n \"token\": get_sd_auth(\"api_token\"),\n \"filter\": salt.utils.json.dumps(params),\n },\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\n \"Could not parse Server Density API Response content: %s\",\n api_response.content,\n )\n raise CommandExecutionError(\n \"Failed to create, Server Density API Response: {}\".format(api_response)\n )\n else:\n return None",
"def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):\n return self.ls.get(uri=uri, api=api, headers=headers, param=param)",
"def flask_internal_get_devices():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n # retrieve pagination\n page_number, per_page = get_pagination(request)\n\n params = {\n 'page_number': page_number,\n 'per_page': per_page,\n 'sortBy': request.args.get('sortBy', None),\n 'attr': request.args.getlist('attr'),\n 'attr_type': request.args.getlist('attr_type'),\n 'label': request.args.get('label', None),\n 'template': request.args.get('template', None),\n 'idsOnly': request.args.get('idsOnly', 'false'),\n }\n\n result = DeviceHandler.get_devices(token, params, True)\n LOGGER.info(f' Getting known internal devices.')\n \n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)",
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def test_get_drives_drive_firmware(self):\n pass",
"def fusion_api_get_li(self, uri=None, api=None, headers=None, param=''):\n return self.li.get(uri=uri, api=api, headers=headers, param=param)",
"def user_sends_get_call_to_the_devices():\n web_app.list_devices()",
"def get_devices_spt(self):\n\n #import pdb; pdb.set_trace()\n if self._drives or self.firmware_version or self.product_name or self.vendor_name or \\\n self.serial_number or self.target_port:\n user_options = True\n else:\n user_options = False\n try:\n # Note: Extra logic to optimize spt device directory scanning.\n if not user_options:\n if self._include_enclosures:\n message = \"Find SCSI Devices\"\n command = \"{tool} show devices dtype=direct,hostmanaged,enclosure\".format(tool=self.tool)\n else:\n message = \"Find SCSI Disk Drives\"\n command = \"{tool} show devices dtype=direct,hostmanaged\".format(tool=self.tool)\n # Use common execute below.\n else:\n # Request enclosures separately.\n if self._include_enclosures:\n message = \"Find SCSI Enclosures\"\n command = \"{tool} show devices dtype=enclosure ofmt=json\".format(tool=self.tool)\n pdata = self._run_command(command=command, message=message,\n logger=self._logger, shell=False, expected_failure=True)\n if pdata['exit_code'] == self.EXIT_STATUS_SUCCESS and pdata['stdout']:\n devices = json.loads(pdata['stdout'])\n self.parse_devices_spt(devices)\n\n message = \"Find SCSI Disk Drives\"\n # Selective drives or all direct access (disk drives).\n if self._drives:\n command = \"{tool} show edt dtype=direct,hostmanaged devices={drives}\"\\\n .format(tool=self.tool, drives=\",\".join(self._drives))\n else:\n command = \"{tool} show devices dtype=direct,hostmanaged\".format(tool=self.tool)\n # Apply optional parameters.\n if self.product_name:\n command += \" pid={product}\".format(product=self.product_name)\n if self.vendor_name:\n command += \" vid={vendor}\".format(vendor=self.vendor_name)\n if self.serial_number:\n command += \" serial={serial}\".format(serial=self.serial_number)\n if self.target_port:\n command += \" tport={target}\".format(target=self.target_port)\n if self.firmware_version:\n command += \" fw_version={firmware}\".format(firmware=self.firmware_version)\n\n # Add common spt options, we want JSON output!\n if self._exclude:\n command += \" exclude={drives}\".format(drives=\",\".join(self._exclude))\n command += \" ofmt=json\"\n # Finally, execute spt and parse its' JSON output (if any).\n pdata = self._run_command(command=command, message=message,\n logger=self._logger, shell=False, expected_failure=True)\n # spt emits warning status (1) and no JSON output if no devices found.\n if pdata['exit_code'] == self.EXIT_STATUS_SUCCESS and pdata['stdout']:\n devices = json.loads(pdata['stdout'])\n self.parse_devices_spt(devices)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc\n\n except ValueError as exc:\n self._logger.error(\"Failed to parse spts' JSON output: {0}\".format(exc))\n raise exc",
"def getDevices(i):\n devices = Account['KTFLR'].devices('monpressprod')\n device = devices[i]\n return device",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def listInputDevices(*args, free: bool=True, primary: bool=True, secondary: bool=True,\n **kwargs)->List[AnyStr]:\n pass",
"def get_boot_order(rfo, api=1, unit=1):\n\n url = f\"/redfish/v{api}/systems/{unit}/bios\"\n res = rfo.get(url)\n if res.status != 200:\n print(f\"Error: {res.status}: {res.read}\")\n return \"XXX\"\n booturl = res.dict['Oem']['Hpe']['Links']['Boot']['@odata.id']\n res = rfo.get(booturl)\n if res.status != 200:\n print(f\"HTTP Fail Status: {res.status} - {res.read}\")\n return \"XXX\"\n return res.dict['DefaultBootOrder']",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def get_block_device_list(vars = {}, log = sys.stderr):\n\n # make sure we can access to the files/directories in /proc\n if not os.access(PROC_PARTITIONS_PATH, os.F_OK):\n return None\n\n # table with valid scsi/sata/ide/raid block device names\n valid_blk_names = {}\n # add in valid sd and hd block device names\n for blk_prefix in ('sd','hd'):\n for blk_num in map (\\\n lambda x: chr(x), range(ord('a'),ord('z')+1)):\n devicename=\"%s%c\" % (blk_prefix, blk_num)\n valid_blk_names[devicename]=None\n\n # add in valid scsi raid block device names\n for M in range(0,1+1):\n for N in range(0,7+1):\n devicename = \"cciss/c%dd%d\" % (M,N)\n valid_blk_names[devicename]=None\n\n for devicename in valid_blk_names.keys():\n # devfs under 2.4 (old boot cds) used to list partitions\n # in a format such as scsi/host0/bus0/target0/lun0/disc\n # and /dev/sda, etc. were just symlinks\n try:\n devfsname= os.readlink( \"/dev/%s\" % devicename )\n valid_blk_names[devfsname]=None\n except OSError:\n pass\n\n # only do this once every system boot\n if not os.access(DEVICES_SCANNED_FLAG, os.R_OK):\n\n # this is ugly. under devfs, device\n # entries in /dev/scsi/.. and /dev/ide/...\n # don't show up until you attempt to read\n # from the associated device at /dev (/dev/sda).\n # so, lets run sfdisk -l (list partitions) against\n # most possible block devices, that way they show\n # up when it comes time to do the install.\n devicenames = valid_blk_names.keys()\n devicenames.sort()\n for devicename in devicenames:\n os.system( \"sfdisk -l /dev/%s > /dev/null 2>&1\" % devicename )\n\n # touch file\n fb = open(DEVICES_SCANNED_FLAG,\"w\")\n fb.close()\n\n devicelist= {}\n\n partitions_file= file(PROC_PARTITIONS_PATH,\"r\")\n line_count= 0\n for line in partitions_file:\n line_count= line_count + 1\n\n # skip the first two lines always\n if line_count < 2:\n continue\n\n parts= string.split(line)\n\n if len(parts) < 4:\n continue\n\n device= parts[3]\n\n # skip and ignore any partitions\n if not valid_blk_names.has_key(device):\n continue\n\n try:\n major= int(parts[0])\n minor= int(parts[1])\n blocks= int(parts[2])\n except ValueError, err:\n continue\n\n gb_size= blocks/BLOCKS_PER_GB\n\n # check to see if the blk device is readonly\n try:\n # can we write to it?\n dev_name= \"/dev/%s\" % device\n fb = open(dev_name,\"w\")\n fb.close()\n readonly=False\n except IOError, e:\n # check if EROFS errno\n if errno.errorcode.get(e.errno,None) == 'EROFS':\n readonly=True\n else:\n # got some other errno, pretend device is readonly\n readonly=True\n\n devicelist[dev_name]= {'major': major,'minor': minor,'blocks': blocks, 'size': gb_size, 'readonly': readonly}\n return devicelist",
"def getFirstData(self) -> ghidra.program.model.listing.Data:\n ...",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def Files(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('_files', default)\n return [HEP.RecordFile(i) for i in tmp]",
"def getFileListDAS(dataset,blacklist=[ ]):\n dataset = dataset.replace('__','/')\n if dataset[0]!='/':\n dataset = '/'+dataset\n instance = 'prod/global'\n if 'USER' in dataset:\n instance = 'prod/phys03'\n #cmd='das_client --limit=0 --query=\"file dataset=%s instance=%s\"'%(dataset,instance)\n cmd = 'das_client --limit=0 --query=\"file dataset=%s instance=%s status=*\"'%(dataset,instance)\n if args.verbose:\n print \"Executing \",cmd\n cmd_out = getoutput( cmd )\n tmpList = cmd_out.split(os.linesep)\n filelist = [ ]\n for line in tmpList:\n if '.root' in line and line not in blacklist:\n #files.append(\"root://cms-xrd-global.cern.ch/\"+line) # global\n filelist.append(\"root://xrootd-cms.infn.it/\"+line) # Eurasia\n filelist.sort()\n return filelist",
"def _default() -> list:\n if metadata is None or metadata.default is None:\n return []\n\n return self._always_array(metadata.default)"
] | [
"0.549111",
"0.5478683",
"0.52667654",
"0.5174764",
"0.5122465",
"0.50894815",
"0.5063932",
"0.5050597",
"0.50475633",
"0.50427073",
"0.49999896",
"0.49472106",
"0.492481",
"0.49211743",
"0.49103346",
"0.49046096",
"0.4892888",
"0.48668748",
"0.4803468",
"0.47868672",
"0.47654316",
"0.47574317",
"0.47509682",
"0.47476742",
"0.4739771",
"0.4729864",
"0.47146946",
"0.4686574",
"0.46861652",
"0.46825418"
] | 0.6767854 | 0 |
Updates an SAS LI using the PATCH http verb. [Arguments] | def fusion_api_patch_sas_li(self, body=None, uri=None, api=None, headers=None):
return self.sasli.patch(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_sas_lig(self, body, uri, api=None, headers=None):\n return self.saslig.update(body, uri, api, headers)",
"def fusion_api_edit_lsg(self, body, uri, api=None, headers=None):\n return self.lsg.update(body, uri, api, headers)",
"def fusion_api_patch_li(self, body=None, uri=None, api=None, headers=None):\n return self.li.patch(body, uri, api, headers)",
"def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)",
"def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})",
"def fusion_api_update_sas_li_firmware(self, body=None, uri=None, api=None, headers=None):\n param = \"/firmware\" # put method expecting a param\n return self.sasli.put(body=body, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_update_ls(self, body=None, uri=None, api=None, headers=None):\n return self.ls.put(body, uri, api, headers)",
"def fusion_api_update_sas_li_from_group(self, uri=None, api=None, headers=None):\n param = '/compliance'\n return self.sasli.put(body=None, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_patch_sas_interconnect(self, body=None, uri=None, api=None, headers=None):\n return self.sasics.patch(body=body, uri=uri, api=api, headers=headers)",
"def update(self,request,pk = None):\n return Response({'http_method':'PUT'})",
"def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})",
"def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})",
"def put(self, request, pk=None):\n return Response({'method': 'patch'})",
"def patch(self, request , pk=None):\n return Response({'message':'PATCH'})",
"def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})",
"def patch(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'patch', api_path, *args, **kwargs)",
"def PatchConcepts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def patch(self):\n\n if session.get(\"login\",False) is not True:\n return {\n \"errno\": 699,\n \"describe\": \"需要登录\"\n }\n\n id = request.form.get(\"id\")\n content = request.form.get(\"content\")\n hashtag = request.form.get(\"hashtag\")\n\n hashtag = [] if hashtag == None or hashtag == \"\" else hashtag.split( \",\" )\n if isinstance(hashtag, str):\n hashtag = json.loads(hashtag)\n\n edit_doc(id, content, hashtag)\n\n return {\"errno\":0}",
"def fusion_api_edit_rack(self, body, uri, api=None, headers=None):\n return self.rack.update(body, uri, api, headers)",
"def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})",
"def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )",
"def sli_update(obj, product_name, name, sli_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slis = client.sli_list(product, name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n with Action('Updating SLI {} for product: {}'.format(name, product_name), nl=True) as act:\n sli = json.load(sli_file)\n\n validate_sli(obj, sli, act)\n\n if not act.errors:\n sli['uri'] = slis[0]['uri']\n s = client.sli_update(sli)\n\n print(json.dumps(s, indent=4))",
"def fusion_api_edit_lig(self, body, uri, api=None, headers=None, etag=None):\n return self.lig.update(body, uri, api, headers, etag)",
"def update():\n return 'update api in put'",
"def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)",
"def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})",
"def _patch(self, path=None, version=None, params=None,\n data=None, json=None, header=None):\n return self.client.patch(module='mam', path=path, version=version,\n params=params, data=data,\n json=json, header=header)",
"def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})",
"def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})"
] | [
"0.66501045",
"0.6501658",
"0.6482534",
"0.6376553",
"0.61542445",
"0.61509186",
"0.6102513",
"0.6058553",
"0.60585386",
"0.605659",
"0.6009829",
"0.6008479",
"0.60002446",
"0.5970388",
"0.5948214",
"0.5940603",
"0.5939549",
"0.5936347",
"0.5934847",
"0.59218144",
"0.59098595",
"0.5904611",
"0.5898835",
"0.5885425",
"0.58767587",
"0.58670926",
"0.58371985",
"0.58274466",
"0.58014065",
"0.58003706"
] | 0.73917496 | 0 |
add a Server Hardware resource. [Arguments] | def fusion_api_add_server_hardware(self, body, api=None, headers=None, param=''):
return self.sh.post(body, api, headers, param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_resource(self, name, controller, ipaddress, ram, cpus, storage, owner=None, flavor='', compute=None, huge_pages=False):\n if compute is None: compute = controller\n args = { 'vm': name,\n 'controller': controller,\n 'ipaddress': ipaddress,\n 'ram': ram,\n 'cpus': cpus,\n 'storage': storage,\n 'flavor': flavor,\n 'compute': compute,\n 'hugepages': huge_pages,\n }\n if owner is not None:\n args['owner'] = owner\n self._NDL_API('addresource', args, None)",
"def add_resource(self, *args, **kwargs):\n return self._resources_manager.add_resource(*args, **kwargs)",
"def add_resource(self, resource_name, resource):\n self.resources[resource_name] = resource",
"def add(self, resource):\n if isinstance(resource, Resource):\n if isinstance(resource, Secret) and \\\n resource.mount != 'cubbyhole':\n ensure_backend(resource,\n SecretBackend,\n self._mounts,\n self.opt,\n False)\n elif isinstance(resource, Mount):\n ensure_backend(resource, SecretBackend, self._mounts, self.opt)\n elif isinstance(resource, Auth):\n ensure_backend(resource, AuthBackend, self._auths, self.opt)\n elif isinstance(resource, AuditLog):\n ensure_backend(resource, LogBackend, self._logs, self.opt)\n\n self._resources.append(resource)\n else:\n msg = \"Unknown resource %s being \" \\\n \"added to context\" % resource.__class__\n raise aomi_excep.AomiError(msg)",
"def add_host():\n # Attempt to recieve POST data\n name = None\n ip = None\n mac = None\n state = None\n if not request.json:\n abort(400)\n try:\n name = request.json.get('deviceName')\n ip = request.json.get('ip')\n mac = request.json.get('mac')\n except:\n abort(400)\n try: # Try to get the state, otherwise default it to off and let the daemon clean up\n state = request.json.get('state')\n if state == None:\n state = 'off'\n except:\n state = 'off'\n # Perform the transaction itself\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n ret = hosts.add(db, name, ip, mac, state)\n ret = {'sid': ret}\n db.commit()\n ret = {'add': ret}\n return jsonify(ret)",
"def add(self, resource):\n if isinstance(resource, Resource):\n if isinstance(resource, (Secret, Mount)):\n ensure_backend(resource, SecretBackend, self._mounts, self.opt)\n elif isinstance(resource, (Auth)):\n ensure_backend(resource, AuthBackend, self._auths, self.opt)\n elif isinstance(resource, (AuditLog)):\n ensure_backend(resource, LogBackend, self._logs, self.opt)\n\n self._resources.append(resource)\n else:\n msg = \"Unknown resource %s being \" \\\n \"added to context\" % resource.__class__\n raise aomi.exceptions.AomiError(msg)",
"def add_machine(args):\n session = Session()\n # the following is used to help with code completion\n env = Environment(name=args.environment)\n try:\n env = session.query(Environment).filter_by(name=args.environment).one()\n except NoResultFound:\n print \"ERROR: couldn't find environment %s\" % args.environment\n sys.exit(1)\n machine = PoolMachine(name=args.name, hostname=args.hostname, environment=env, online=True)\n session.add(machine)\n session.commit()\n print repr(machine)",
"def add_resource(self, resource, resource_start, resource_dur):\n self.resources.append(resource)\n resource.add_mode(self.op_number, self.mode_number, resource_start, resource_dur)",
"def addResource(self, *args):\n return _libsbml.CVTerm_addResource(self, *args)",
"def add(cls, client, resource) :\n try :\n if type(resource) is not list :\n addresource = nshttpprofile()\n addresource.name = resource.name\n addresource.dropinvalreqs = resource.dropinvalreqs\n addresource.markhttp09inval = resource.markhttp09inval\n addresource.markconnreqinval = resource.markconnreqinval\n addresource.cmponpush = resource.cmponpush\n addresource.conmultiplex = resource.conmultiplex\n addresource.maxreusepool = resource.maxreusepool\n addresource.dropextracrlf = resource.dropextracrlf\n addresource.incomphdrdelay = resource.incomphdrdelay\n addresource.websocket = resource.websocket\n addresource.rtsptunnel = resource.rtsptunnel\n addresource.reqtimeout = resource.reqtimeout\n addresource.adpttimeout = resource.adpttimeout\n addresource.reqtimeoutaction = resource.reqtimeoutaction\n addresource.dropextradata = resource.dropextradata\n addresource.weblog = resource.weblog\n addresource.clientiphdrexpr = resource.clientiphdrexpr\n addresource.maxreq = resource.maxreq\n addresource.persistentetag = resource.persistentetag\n addresource.spdy = resource.spdy\n addresource.http2 = resource.http2\n addresource.reusepooltimeout = resource.reusepooltimeout\n addresource.maxheaderlen = resource.maxheaderlen\n addresource.minreusepool = resource.minreusepool\n addresource.http2maxheaderlistsize = resource.http2maxheaderlistsize\n addresource.http2maxframesize = resource.http2maxframesize\n addresource.http2maxconcurrentstreams = resource.http2maxconcurrentstreams\n addresource.http2initialwindowsize = resource.http2initialwindowsize\n addresource.http2headertablesize = resource.http2headertablesize\n return addresource.add_resource(client)\n else :\n if (resource and len(resource) > 0) :\n addresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n addresources[i].name = resource[i].name\n addresources[i].dropinvalreqs = resource[i].dropinvalreqs\n addresources[i].markhttp09inval = resource[i].markhttp09inval\n addresources[i].markconnreqinval = resource[i].markconnreqinval\n addresources[i].cmponpush = resource[i].cmponpush\n addresources[i].conmultiplex = resource[i].conmultiplex\n addresources[i].maxreusepool = resource[i].maxreusepool\n addresources[i].dropextracrlf = resource[i].dropextracrlf\n addresources[i].incomphdrdelay = resource[i].incomphdrdelay\n addresources[i].websocket = resource[i].websocket\n addresources[i].rtsptunnel = resource[i].rtsptunnel\n addresources[i].reqtimeout = resource[i].reqtimeout\n addresources[i].adpttimeout = resource[i].adpttimeout\n addresources[i].reqtimeoutaction = resource[i].reqtimeoutaction\n addresources[i].dropextradata = resource[i].dropextradata\n addresources[i].weblog = resource[i].weblog\n addresources[i].clientiphdrexpr = resource[i].clientiphdrexpr\n addresources[i].maxreq = resource[i].maxreq\n addresources[i].persistentetag = resource[i].persistentetag\n addresources[i].spdy = resource[i].spdy\n addresources[i].http2 = resource[i].http2\n addresources[i].reusepooltimeout = resource[i].reusepooltimeout\n addresources[i].maxheaderlen = resource[i].maxheaderlen\n addresources[i].minreusepool = resource[i].minreusepool\n addresources[i].http2maxheaderlistsize = resource[i].http2maxheaderlistsize\n addresources[i].http2maxframesize = resource[i].http2maxframesize\n addresources[i].http2maxconcurrentstreams = resource[i].http2maxconcurrentstreams\n addresources[i].http2initialwindowsize = resource[i].http2initialwindowsize\n addresources[i].http2headertablesize = resource[i].http2headertablesize\n result = cls.add_bulk_request(client, addresources)\n return result\n except Exception as e :\n raise e",
"def add_resources(self, filename):\n slot, app_id, _ = get_info_from_filename(filename)\n self.root_coap.add_resource((app_id, 'version',),\n FirmwareVersionResource(self,\n app_id, slot))\n self.root_coap.add_resource((app_id, slot, 'name', ),\n FirmwareNameResource(self, app_id, slot))\n self.root_coap.add_resource((app_id, slot, 'firmware', ),\n FirmwareBinaryResource(self, app_id, slot))",
"def hardware(*args, brdType: bool=True, cpuType: bool=True, graphicsType: bool=True, megaHertz:\n bool=True, numProcessors: bool=True, **kwargs)->AnyStr:\n pass",
"def registerExistingServer():\n cd('/')\n cd('/Servers/'+managedServername)\n registerServer(cmo)",
"def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))",
"def add_resource():\n request_data = request.get_json()\n\n if 'hostname' in request_data and 'username' in request_data and 'password' in request_data:\n try:\n Resource.add_resource(\n request_data['hostname'],\n request_data['username'],\n request_data['password'],\n request_data.get('interval', 60)\n )\n response = Response({}, 201, mimetype=\"application/json\")\n response.headers['Location'] = f'/Resource/{request_data[\"hostname\"]}'\n\n HEALTH_AGGREGATOR.synchronize()\n\n return response\n except ResourceAlreadyExistsException:\n return Response(\n json.dumps({'error': 'A resource already exists with the given hostname'}),\n 400,\n mimetype='application/json'\n )\n\n return Response(\n json.dumps({'error': 'Hostname / Username / Password missing in the request body'}),\n 400,\n mimetype='application/json'\n )",
"def add_resource(self, resource, *urls, **kwargs):\n if self.app is not None:\n self._register_view(self.app, resource, *urls, **kwargs)\n else:\n self.resources.append((resource, urls, kwargs))",
"def add(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\taddresource = lbprofile()\n\t\t\t\taddresource.lbprofilename = resource.lbprofilename\n\t\t\t\taddresource.dbslb = resource.dbslb\n\t\t\t\taddresource.processlocal = resource.processlocal\n\t\t\t\taddresource.httponlycookieflag = resource.httponlycookieflag\n\t\t\t\taddresource.cookiepassphrase = resource.cookiepassphrase\n\t\t\t\taddresource.usesecuredpersistencecookie = resource.usesecuredpersistencecookie\n\t\t\t\taddresource.useencryptedpersistencecookie = resource.useencryptedpersistencecookie\n\t\t\t\treturn addresource.add_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\taddresources = [ lbprofile() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\taddresources[i].lbprofilename = resource[i].lbprofilename\n\t\t\t\t\t\taddresources[i].dbslb = resource[i].dbslb\n\t\t\t\t\t\taddresources[i].processlocal = resource[i].processlocal\n\t\t\t\t\t\taddresources[i].httponlycookieflag = resource[i].httponlycookieflag\n\t\t\t\t\t\taddresources[i].cookiepassphrase = resource[i].cookiepassphrase\n\t\t\t\t\t\taddresources[i].usesecuredpersistencecookie = resource[i].usesecuredpersistencecookie\n\t\t\t\t\t\taddresources[i].useencryptedpersistencecookie = resource[i].useencryptedpersistencecookie\n\t\t\t\tresult = cls.add_bulk_request(client, addresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e",
"def add_resource(self, **kwargs):\n data = self.validated_data\n # pylint: disable=no-member\n return WorkshopResource.objects.create(\n name=data['name'], link=data['link'], resource_type=data['resource_type'],\n workshop=self.context['workshop'])",
"def add_server(self, server):\n self.all_servers[server.server_id] = server\n self.servers_jobs_list[server.server_id] = server.jobs\n if server.status:\n self.servers_online[server.server_id] = server\n else:\n self.servers_offline[server.server_id] = server\n print(f\"Server '{server.server_name} added'\")",
"def add_worker(ip: str = Argument(..., help=\"Server IP\"),\n key_ssh: str= Argument(..., help=\"Path to ssh key file\"),\n user_ssh: str = Argument(..., help=\"User in the server\"),\n hostname: str = Argument(..., help=\"Ex: ws01.example.com\"),\n mannager_ip: str = Argument(..., help=\"Mannager cluster IP\")):\n registers = os.getcwd() + '/commands/templates/manager_registers.txt'\n if os.path.exists(registers):\n with open(registers, 'r') as f:\n line = f.readline()\n while line:\n line = line.split(' ')\n line_ip = line[-3].split(':')[0]\n if line_ip == mannager_ip:\n echo(style(\"Connecting with Server\", fg=blue, bold=True))\n server = create_connection(user_ssh, ip, key_ssh)\n install_docker(server)\n install_docker_compose(server)\n init_service(hostname, server)\n server.run(' '.join(line[:-2]))\n break\n else:\n line = f.readline()\n\n msg = 'Not registers for the mannager server ip'\n echo(style(msg, fg=blue, bold=True))\n msg = 'Enter server user for of mannager node'\n user = prompt(style(msg, fg=blue, bold=True))\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n key = prompt(msg)\n server = create_connection(user, mannager_ip, key)\n st = str(server.run('docker swarm join-token worker')).split()\n print(st)\n else:\n msg = 'Not registers for the mannager server ip'\n echo(style(msg, fg=blue, bold=True))\n\n msg = 'Enter server user for of mannager node'\n user = prompt(style(msg, fg=blue, bold=True))\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n key = prompt(msg)\n #server = create_connection(user, ip_mannager, key)",
"def add_resource(self, resource_name, value):\n name_check = self._resource_name_check(resource_name)\n if name_check == EnvironmentDict._VALID or name_check == EnvironmentDict._EXISTS:\n self._e_dict['resources'][resource_name] = value\n return self",
"def pre_logical_interface_create(self, resource_dict):\n pass",
"def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)",
"def post_logical_interface_create(self, resource_dict):\n pass",
"def addLinkToResource(link):\n\n\tif link not in variables.resources:\n\t\tvariables.resources.append(link)",
"def add_device():\n input = request.get_json()\n\n if input == None:\n return jsonify({'error': 'Invalid POST request, no data'}), 400\n if not 'name' in input:\n return jsonify({'error': 'Invalid POST request, missing name'}), 400\n if not 'ip_addr' in input:\n return jsonify({'error': 'Invalid POST request, missing ip_addr'}), 400\n if not 'device_type_id' in input:\n return jsonify({'error': 'Invalid POST request, missing device_type_id'}), 400\n if not 'sw_version' in input:\n return jsonify({'error': 'Invalid POST request, missing sw_version'}), 400\n if not 'serial_number' in input:\n return jsonify({'error': 'Invalid POST request, missing serial_number'}), 400\n if not 'datacenter' in input:\n return jsonify({'error': 'Invalid POST request, missing datacenter'}), 400\n if not 'location' in input:\n return jsonify({'error': 'Invalid POST request, missing location'}), 400\n\n if not 'console' in input:\n input['console'] = ''\n if not 'description' in input:\n input['description'] = ''\n if not 'notes' in input:\n input['notes'] = ''\n\n netAdminToolDB = app.config['DATABASE']\n id = netAdminToolDB.add_device(input['name'], input['ip_addr'],\n input['device_type_id'], input['sw_version'],\n input['serial_number'], input['datacenter'], input['location'],\n input['console'], input['description'], input['notes'])\n\n device = netAdminToolDB.get_device(id)\n deviceDict = dict(device)\n uri = url_for('get_device',device_id=device.id,_external=True)\n deviceDict['uri'] = uri\n\n return jsonify({'device':deviceDict}), 201",
"def post_physical_interface_create(self, resource_dict):\n pass",
"def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources",
"def addsite(self, volume, _cfg=None) :\n name_or_id = self.get_name_or_id(volume)\n CmdList=[_cfg.binaries[\"vos\"], \"addsite\",\"-server\", \"%s\" % volume.servername, \"-partition\", \"%s\" % volume.partition, \"-id\", \"%s\" % name_or_id, \"-cell\", \"%s\" % _cfg.cell ]\n return CmdList,PM.addsite",
"def resource_create(resource_id, resource_type, resource_options=None, cibfile=None):\n return item_create(\n item=\"resource\",\n item_id=resource_id,\n item_type=resource_type,\n extra_args=resource_options,\n cibfile=cibfile,\n )"
] | [
"0.61094224",
"0.60468185",
"0.60436165",
"0.6028929",
"0.5990869",
"0.59473324",
"0.57917136",
"0.5691803",
"0.5640218",
"0.5632119",
"0.5586816",
"0.55330503",
"0.5507888",
"0.5504523",
"0.5498091",
"0.54637474",
"0.538984",
"0.53824514",
"0.5371367",
"0.53270954",
"0.5325815",
"0.53246975",
"0.531777",
"0.53038466",
"0.5254511",
"0.5177455",
"0.51753575",
"0.5166294",
"0.5163511",
"0.5162421"
] | 0.7051447 | 0 |
Updates a Server Hardware resource. [Arguments] | def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):
return self.sh.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def fusion_api_patch_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.patch(body, uri, api, headers)",
"def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)",
"def fusion_api_add_server_hardware(self, body, api=None, headers=None, param=''):\n return self.sh.post(body, api, headers, param)",
"def fusion_api_edit_server_hardware_power_state(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/powerState')",
"def update(openstack_resource, args):\n args = reset_dict_empty_keys(args)\n openstack_resource.update(args)",
"def fusion_api_edit_server_hardware_mp_firmware_version(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/mpFirmwareVersion')",
"def update_firmware(self) -> str:",
"def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)",
"def _edit_server_hardware(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n if not selenium2lib._is_element_present(FusionServerHardwarePage.ID_PAGE_LABEL):\n base_page.navigate_base(FusionServerHardwarePage.ID_PAGE_LABEL,\n FusionUIBaseElements.ID_MENU_LINK_SERVER_HARDWARE, \"css=span.hp-page-item-count\")\n if not serverhardware.power_off_server_by_name(profile.server):\n logger._warn(\"Failed to powerOff the server %s\" % profile.server)\n logger._warn(\"Can't proceed with server profile creation on server %s\" % profile.server)\n continue\n # Navigating to Server profile page\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n ui_lib.wait_for_element(FusionUIBaseElements.ID_MAIN_MENU_CONTROL, PerfConstants.DEFAULT_SYNC_TIME)\n navigate()\n\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.profilename not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.profilename)\n continue\n if profile.server == \"\":\n logger._warn(\"Mandatory fields to edit server hardware can't be empty\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._log_to_console_and_log_file(\"Server is not powered off, and switching off now\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWER_PRESS_AND_HOLD)\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_SERVER_POWER_OFF_VALIDATE, PerfConstants.SERVER_POWER_OFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._warn(\"Failed to power off the server %s\" % profile.server)\n else:\n logger._log_to_console_and_log_file(\"Successfully server %s is powered off\" % profile.server)\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION)\n # New Code\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION):\n errMsg = selenium2lib._get_text(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION_CONTENT)\n logger._warn(errMsg)\n logger._warn(\"Unable to edit profile server hardware %s\" % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE)\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n strTimeStamp = selenium2lib._get_text(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n logger._log_to_console_and_log_file(strTimeStamp)\n\n # Verify profile server hardware updation status in server profile page (Under Activity tab)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp), PerfConstants.CREATE_SERVER_PROFILE_TIME)\n\n if selenium2lib._is_element_present(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp)):\n logger._log_to_console_and_log_file(\"Server profile '%s' is edited successfully\" % profile.profilename)\n else:\n logger._warn(\"Failed to edit server profile '%s' hardware\" % profile.profilename)",
"def updateDeviceManagementInterface(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['devices', 'configure', 'managementInterface'],\n 'operation': 'updateDeviceManagementInterface'\n }\n resource = f'/devices/{serial}/managementInterface'\n\n body_params = ['wan1', 'wan2', ]\n payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}\n action = {\n \"resource\": resource,\n \"operation\": \"update\",\n \"body\": payload\n }\n return action",
"def fusion_api_refresh_server_hardware(self, body={\"refreshState\": \"RefreshPending\"}, uri=None, api=None, headers=None):\n return self.sh.update(body, uri=uri, api=api, headers=headers, param='/refreshState')",
"def updateResource(self, authenticationToken, resource):\r\n self.send_updateResource(authenticationToken, resource)\r\n return self.recv_updateResource()",
"def fusion_api_edit_server_hardware_types(self, body, uri, api=None, headers=None):\n return self.types.update(body, uri, api, headers)",
"def test_update_hyperflex_server_firmware_version(self):\n pass",
"def update_firmware(firmware_path, script_path):\n\n args = ['uflash', '-r', firmware_path, script_path]\n subprocess.call(args)",
"def update(self):\n self.device = self._api.device_query(self._hardware_address, {})",
"def update(cls, client, resource) :\n try :\n if type(resource) is not list :\n updateresource = nshttpprofile()\n updateresource.name = resource.name\n updateresource.dropinvalreqs = resource.dropinvalreqs\n updateresource.markhttp09inval = resource.markhttp09inval\n updateresource.markconnreqinval = resource.markconnreqinval\n updateresource.cmponpush = resource.cmponpush\n updateresource.conmultiplex = resource.conmultiplex\n updateresource.maxreusepool = resource.maxreusepool\n updateresource.dropextracrlf = resource.dropextracrlf\n updateresource.incomphdrdelay = resource.incomphdrdelay\n updateresource.websocket = resource.websocket\n updateresource.rtsptunnel = resource.rtsptunnel\n updateresource.reqtimeout = resource.reqtimeout\n updateresource.adpttimeout = resource.adpttimeout\n updateresource.reqtimeoutaction = resource.reqtimeoutaction\n updateresource.dropextradata = resource.dropextradata\n updateresource.weblog = resource.weblog\n updateresource.clientiphdrexpr = resource.clientiphdrexpr\n updateresource.maxreq = resource.maxreq\n updateresource.persistentetag = resource.persistentetag\n updateresource.spdy = resource.spdy\n updateresource.http2 = resource.http2\n updateresource.http2maxheaderlistsize = resource.http2maxheaderlistsize\n updateresource.http2maxframesize = resource.http2maxframesize\n updateresource.http2maxconcurrentstreams = resource.http2maxconcurrentstreams\n updateresource.http2initialwindowsize = resource.http2initialwindowsize\n updateresource.http2headertablesize = resource.http2headertablesize\n updateresource.reusepooltimeout = resource.reusepooltimeout\n updateresource.maxheaderlen = resource.maxheaderlen\n updateresource.minreusepool = resource.minreusepool\n return updateresource.update_resource(client)\n else :\n if (resource and len(resource) > 0) :\n updateresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n updateresources[i].name = resource[i].name\n updateresources[i].dropinvalreqs = resource[i].dropinvalreqs\n updateresources[i].markhttp09inval = resource[i].markhttp09inval\n updateresources[i].markconnreqinval = resource[i].markconnreqinval\n updateresources[i].cmponpush = resource[i].cmponpush\n updateresources[i].conmultiplex = resource[i].conmultiplex\n updateresources[i].maxreusepool = resource[i].maxreusepool\n updateresources[i].dropextracrlf = resource[i].dropextracrlf\n updateresources[i].incomphdrdelay = resource[i].incomphdrdelay\n updateresources[i].websocket = resource[i].websocket\n updateresources[i].rtsptunnel = resource[i].rtsptunnel\n updateresources[i].reqtimeout = resource[i].reqtimeout\n updateresources[i].adpttimeout = resource[i].adpttimeout\n updateresources[i].reqtimeoutaction = resource[i].reqtimeoutaction\n updateresources[i].dropextradata = resource[i].dropextradata\n updateresources[i].weblog = resource[i].weblog\n updateresources[i].clientiphdrexpr = resource[i].clientiphdrexpr\n updateresources[i].maxreq = resource[i].maxreq\n updateresources[i].persistentetag = resource[i].persistentetag\n updateresources[i].spdy = resource[i].spdy\n updateresources[i].http2 = resource[i].http2\n updateresources[i].http2maxheaderlistsize = resource[i].http2maxheaderlistsize\n updateresources[i].http2maxframesize = resource[i].http2maxframesize\n updateresources[i].http2maxconcurrentstreams = resource[i].http2maxconcurrentstreams\n updateresources[i].http2initialwindowsize = resource[i].http2initialwindowsize\n updateresources[i].http2headertablesize = resource[i].http2headertablesize\n updateresources[i].reusepooltimeout = resource[i].reusepooltimeout\n updateresources[i].maxheaderlen = resource[i].maxheaderlen\n updateresources[i].minreusepool = resource[i].minreusepool\n result = cls.update_bulk_request(client, updateresources)\n return result\n except Exception as e :\n raise e",
"def updateResource(self, authenticationToken, resource):\r\n pass",
"def fusion_api_edit_server_hardware_environmental_config(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/environmentalConfiguration')",
"def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = l3param()\n\t\t\t\tupdateresource.srcnat = resource.srcnat\n\t\t\t\tupdateresource.icmpgenratethreshold = resource.icmpgenratethreshold\n\t\t\t\tupdateresource.overridernat = resource.overridernat\n\t\t\t\tupdateresource.dropdfflag = resource.dropdfflag\n\t\t\t\tupdateresource.miproundrobin = resource.miproundrobin\n\t\t\t\tupdateresource.externalloopback = resource.externalloopback\n\t\t\t\tupdateresource.tnlpmtuwoconn = resource.tnlpmtuwoconn\n\t\t\t\tupdateresource.usipserverstraypkt = resource.usipserverstraypkt\n\t\t\t\tupdateresource.forwardicmpfragments = resource.forwardicmpfragments\n\t\t\t\tupdateresource.dropipfragments = resource.dropipfragments\n\t\t\t\tupdateresource.acllogtime = resource.acllogtime\n\t\t\t\tupdateresource.implicitaclallow = resource.implicitaclallow\n\t\t\t\tupdateresource.dynamicrouting = resource.dynamicrouting\n\t\t\t\tupdateresource.ipv6dynamicrouting = resource.ipv6dynamicrouting\n\t\t\t\treturn updateresource.update_resource(client)\n\t\texcept Exception as e :\n\t\t\traise e",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def update(self):\n #self._switch.odlclient._request_json(self._path, method=\"put\", json={\n # \"flow\": self._odl_inventory()\n #})\n self.remove() # actually, remove only uses self.switch and self.id, so this removes the other entry as well.\n self.deploy()",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def test_update_hyperflex_server_model(self):\n pass",
"def updateDevice(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['devices', 'configure'],\n 'operation': 'updateDevice'\n }\n resource = f'/devices/{serial}'\n\n body_params = ['name', 'tags', 'lat', 'lng', 'address', 'notes', 'moveMapMarker', 'switchProfileId', 'floorPlanId', ]\n payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}\n action = {\n \"resource\": resource,\n \"operation\": \"update\",\n \"body\": payload\n }\n return action",
"def fusion_api_li_upgrade_firmware(self, body=None, uri=None, api=None, param='', headers=None):\n param = '/firmware'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)",
"def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e",
"def update(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n return host.update(**kwargs)",
"def update(device_id, **params):\n params = _clean_salt_variables(params)\n\n api_response = requests.put(\n \"https://api.serverdensity.io/inventory/devices/\" + device_id,\n params={\"token\": get_sd_auth(\"api_token\")},\n data=params,\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\n \"Could not parse Server Density API Response content: %s\",\n api_response.content,\n )\n raise CommandExecutionError(\n \"Failed to create, API Response: {}\".format(api_response)\n )\n else:\n return None"
] | [
"0.71226937",
"0.65816915",
"0.63041496",
"0.6302513",
"0.6005326",
"0.59792364",
"0.5869079",
"0.5853303",
"0.5816557",
"0.5771126",
"0.5758745",
"0.55203414",
"0.54762745",
"0.54751337",
"0.54741603",
"0.54285014",
"0.54248166",
"0.53858274",
"0.5376719",
"0.53216815",
"0.5319561",
"0.53067946",
"0.5279365",
"0.5270883",
"0.52689976",
"0.5260752",
"0.52589023",
"0.5214453",
"0.5212881",
"0.5205456"
] | 0.7146489 | 0 |
Sets the mpFirmwareVersion for a server hardware resource. [Arguments] | def fusion_api_edit_server_hardware_mp_firmware_version(self, body, uri, api=None, headers=None):
return self.sh.update(body, uri, api, headers, param='/mpFirmwareVersion') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def firmware_version(self, firmware_version: str):\n\n self._firmware_version = firmware_version",
"def test_update_hyperflex_server_firmware_version(self):\n pass",
"def test_patch_hyperflex_server_firmware_version(self):\n pass",
"def test_create_hyperflex_server_firmware_version(self):\n pass",
"def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)",
"def setVersion(self, *args):\n\n self._version = '.'.join( [str(arg) for arg in args] )",
"def setProgramVersion(self, *args):\n return _libsbml.SBMLWriter_setProgramVersion(self, *args)",
"def update_firmware(self) -> str:",
"def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']",
"def update_firmware(self):\n return self._dll.JLINKARM_UpdateFirmwareIfNewer()",
"def firmware_version(self):\n return self.data.get('fw_ver')",
"def get_firmware_version(self):\n cmd = protocol.GET_FIRMWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.firmware_version = value[0][1:]\n else:\n return False",
"def get_fw_ver(self, rec, report):\n\n rec.VAL = self.crate.mch_fw_ver[self.slot]",
"def _set_version(self) -> None:\n proc = subprocess.Popen([self.hmy_binary_path, \"version\"], env=self.environment,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if not err:\n raise RuntimeError(f\"Could not get version.\\n\"\n f\"\\tGot exit code {proc.returncode}. Expected non-empty error message.\")\n self.version = err.decode().strip()",
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def update_firmware(firmware_path, script_path):\n\n args = ['uflash', '-r', firmware_path, script_path]\n subprocess.call(args)",
"def firmware_version(self):\n return self._read(MX_FIRMWARE_VERSION)",
"def test_fw_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'FW version' in result.output\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0",
"def hardware_version(self):\n version = self._dll.JLINKARM_GetHardwareVersion()\n major = version / 10000 % 100\n minor = version / 100 % 100\n return '%d.%02d' % (major, minor)",
"def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()",
"def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")",
"def set_version(self, bundle, ctx, filename, version):",
"def update_firmware(self) -> None:\n\n BROADCAST_ID = 0xFFF\n firmware_update_message = self.__set_module_state(\n BROADCAST_ID, Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF\n )\n self._send_q.put(firmware_update_message)\n self.__delay()",
"def set_os_version(self, nVmOsVersion):\n\t\tcall_sdk_function('PrlVmCfg_SetOsVersion', self.handle, nVmOsVersion)",
"def hardware_version(self):\n return self.data.get('hw_ver')",
"def firmware_version(self) -> str:\n return self._firmware_version",
"def get_hardware_version(self):\n cmd = protocol.GET_HARDWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.hardware_version = value[0][1:]\n else:\n return False",
"def test_version(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n mocker.patch('pysds011.driver.SDS011.cmd_set_sleep')\n mocker.patch('pysds011.driver.SDS011.cmd_set_mode')\n cfv = mocker.patch('pysds011.driver.SDS011.cmd_firmware_ver')\n cfv.return_value = {'pretty': 'BimBumBam'}\n runner = CliRunner()\n result = runner.invoke(main, ['fw-version'])\n\n assert 'BimBumBam' in result.output\n assert result.exit_code == 0",
"def fw_version(self):\n return self.capabilities.get(\"fw_ver\")"
] | [
"0.6627622",
"0.6617696",
"0.65401965",
"0.62031513",
"0.600729",
"0.5926034",
"0.58608204",
"0.5817993",
"0.5745384",
"0.5706897",
"0.566575",
"0.56042427",
"0.5599718",
"0.55834997",
"0.5563769",
"0.5516268",
"0.55147135",
"0.54602534",
"0.54601395",
"0.54478574",
"0.54277396",
"0.5422989",
"0.54185694",
"0.5403409",
"0.54017353",
"0.53986734",
"0.53928506",
"0.53800255",
"0.53649426",
"0.5306347"
] | 0.7343825 | 0 |
Gets a default or paginated collection of Server Hardware. [Arguments] | def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):
return self.sh.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,\r\n domain=None, datacenter=None, nic_speed=None,\r\n public_ip=None, private_ip=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n hw_items = [\r\n 'id',\r\n 'hostname',\r\n 'domain',\r\n 'hardwareStatusId',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'datacenter',\r\n ]\r\n server_items = [\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n ]\r\n\r\n kwargs['mask'] = '[mask[%s],' \\\r\n ' mask(SoftLayer_Hardware_Server)[%s]]' % \\\r\n (','.join(hw_items),\r\n ','.join(server_items))\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n if tags:\r\n _filter['hardware']['tagReferences']['tag']['name'] = {\r\n 'operation': 'in',\r\n 'options': [{'name': 'data', 'value': tags}],\r\n }\r\n\r\n if cpus:\r\n _filter['hardware']['processorPhysicalCoreAmount'] = \\\r\n query_filter(cpus)\r\n\r\n if memory:\r\n _filter['hardware']['memoryCapacity'] = query_filter(memory)\r\n\r\n if hostname:\r\n _filter['hardware']['hostname'] = query_filter(hostname)\r\n\r\n if domain:\r\n _filter['hardware']['domain'] = query_filter(domain)\r\n\r\n if datacenter:\r\n _filter['hardware']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n\r\n if nic_speed:\r\n _filter['hardware']['networkComponents']['maxSpeed'] = \\\r\n query_filter(nic_speed)\r\n\r\n if public_ip:\r\n _filter['hardware']['primaryIpAddress'] = \\\r\n query_filter(public_ip)\r\n\r\n if private_ip:\r\n _filter['hardware']['primaryBackendIpAddress'] = \\\r\n query_filter(private_ip)\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n return self.account.getHardware(**kwargs)",
"def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def get_hardware(self, hardware_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'hostname',\r\n 'domain',\r\n 'provisionDate',\r\n 'hardwareStatus',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'notes',\r\n 'privateNetworkOnlyFlag',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'networkManagementIpAddress',\r\n 'userData',\r\n 'datacenter',\r\n '''networkComponents[id, status, speed, maxSpeed, name,\r\n ipmiMacAddress, ipmiIpAddress, macAddress, primaryIpAddress,\r\n port, primarySubnet[id, netmask, broadcastAddress,\r\n networkIdentifier, gateway]]''',\r\n 'hardwareChassis[id,name]',\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n '''operatingSystem[\r\n softwareLicense[softwareDescription[manufacturer,\r\n name,\r\n version,\r\n referenceCode]],\r\n passwords[username,password]]''',\r\n 'billingItem.recurringFee',\r\n 'hourlyBillingFlag',\r\n 'tagReferences[id,tag[name,id]]',\r\n 'networkVlans[id,vlanNumber,networkSpace]',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.hardware.getObject(id=hardware_id, **kwargs)",
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def getHardware(self):\n return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0])",
"def hardware(self):\n return self._hardware",
"def select_host_characteristics(self):\n return IMPL.select_host_characteristics()",
"def test_get_hyperflex_server_model_list(self):\n pass",
"def test_get_node_hardware(self):\n pass",
"def get_hardware(hardware_name: str) -> str:\n fixed_name = \"-\".join(hardware_name.lower().split())\n output = _get_content(fixed_name, \"hardware\")\n\n return output",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def ls(**params):\n params = _clean_salt_variables(params)\n\n endpoint = \"devices\"\n\n # Change endpoint if there are params to filter by:\n if params:\n endpoint = \"resources\"\n\n # Convert all ints to strings:\n for key, val in params.items():\n params[key] = str(val)\n\n api_response = requests.get(\n \"https://api.serverdensity.io/inventory/{}\".format(endpoint),\n params={\n \"token\": get_sd_auth(\"api_token\"),\n \"filter\": salt.utils.json.dumps(params),\n },\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\n \"Could not parse Server Density API Response content: %s\",\n api_response.content,\n )\n raise CommandExecutionError(\n \"Failed to create, Server Density API Response: {}\".format(api_response)\n )\n else:\n return None",
"def test_get_node_hardware_fast(self):\n pass",
"def fusion_api_get_server_hardware_firmware(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/firmware')",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def list_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print PoolMachine.summary_header()\n print \"-\" * 80\n for machine in machines:\n print machine.summary()",
"def fusion_api_get_server_hardware_utilization(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/utilization')",
"def api_get(self):\n sdc = DataCenter(location=self.joyent_uri, key_id=self.joyent_key_id, secret=self.joyent_secret,\n allow_agent=False, verbose=self.debug)\n servers = sdc.machines()\n return servers",
"def all_machines():\n return sorted(MACHINES, key=str)",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def fetch_router_list(args):\n nd = NetDevices(production_only=opts.nonprod)\n ret = []\n blocked_groups = []\n if args:\n for arg in args:\n # Try to find the device, but fail gracefully if it can't be found\n device = device_match(arg)\n if not pass_filters(device) or device is None:\n continue\n ret.append(device)\n\n else:\n for entry in nd.itervalues():\n if entry.owningTeam in blocked_groups:\n continue\n if not pass_filters(entry):\n continue\n ret.append(entry)\n\n return sorted(ret, reverse=True)",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def machine_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceMachineSpec']]:\n return pulumi.get(self, \"machine_specs\")",
"def device_get(self, filters={}):\n return {}",
"def _get_server_hardware_mac(server_hardware):\n sh_physical_port = None\n\n if server_hardware.get('portMap'):\n for device in server_hardware.get(\n 'portMap', {}).get('deviceSlots', ()):\n for physical_port in device.get('physicalPorts', ()):\n if physical_port.get('type') == 'Ethernet':\n sh_physical_port = physical_port\n break\n if sh_physical_port:\n for virtual_port in sh_physical_port.get('virtualPorts', ()):\n # NOTE(nicodemos): Ironic oneview drivers needs to use a\n # port that type is Ethernet and function identifier 'a' for\n # this FlexNIC to be able to make a deploy using PXE.\n if virtual_port.get('portFunction') == 'a':\n return virtual_port.get('mac', ()).lower()\n raise exception.OneViewError(\n _(\"There is no Ethernet port on the Server Hardware: %s\") %\n server_hardware.get('uri'))\n else:\n raise exception.OneViewError(\n _(\"The Server Hardware: %s doesn't have a list of adapters/slots, \"\n \"their ports and attributes. This information is available only \"\n \"for blade servers. Is this a rack server?\") %\n server_hardware.get('uri'))",
"def flask_internal_get_devices():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n # retrieve pagination\n page_number, per_page = get_pagination(request)\n\n params = {\n 'page_number': page_number,\n 'per_page': per_page,\n 'sortBy': request.args.get('sortBy', None),\n 'attr': request.args.getlist('attr'),\n 'attr_type': request.args.getlist('attr_type'),\n 'label': request.args.get('label', None),\n 'template': request.args.get('template', None),\n 'idsOnly': request.args.get('idsOnly', 'false'),\n }\n\n result = DeviceHandler.get_devices(token, params, True)\n LOGGER.info(f' Getting known internal devices.')\n \n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)",
"def get_devices(self):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevice'.format(self.url_base))\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tjson_res = ERS._to_json(resp.text)['ns3:searchResult']\n\n\t\tif resp.status_code == 200 and int(json_res['@total']) > 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(i['@name'], i['@id'])\n\t\t\t\t\t\t\t\t for i in json_res['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 1:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = [(json_res['ns3:resources']['ns5:resource']['@name'],\n\t\t\t\t\t\t\t\t json_res['ns3:resources']['ns5:resource']['@id'])]\n\t\t\treturn result\n\n\t\telif resp.status_code == 200 and int(json_res['@total']) == 0:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = []\n\t\t\treturn result\n\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result"
] | [
"0.66805214",
"0.6052699",
"0.5832132",
"0.5794585",
"0.5691229",
"0.5631433",
"0.5605564",
"0.56037563",
"0.55718684",
"0.55635154",
"0.55176204",
"0.55051565",
"0.5365245",
"0.5346477",
"0.53398293",
"0.5338803",
"0.53372806",
"0.5295526",
"0.5254829",
"0.52471364",
"0.5244047",
"0.5227606",
"0.52128756",
"0.5207948",
"0.520207",
"0.5134795",
"0.5120602",
"0.51158655",
"0.5096",
"0.5095761"
] | 0.650311 | 1 |
Gets firmware compliance list of Server Hardware. [Arguments] | def fusion_api_get_server_hardware_firmware_compliance(self, body, api=None, headers=None):
return self.sh.post(body=body, param='/firmware-compliance', api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def fusion_api_get_server_hardware_firmware(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/firmware')",
"def query_supported_software(self):\n api_uri = self._uri_dict.get('querySupportedSoftware')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)",
"def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,\r\n domain=None, datacenter=None, nic_speed=None,\r\n public_ip=None, private_ip=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n hw_items = [\r\n 'id',\r\n 'hostname',\r\n 'domain',\r\n 'hardwareStatusId',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'datacenter',\r\n ]\r\n server_items = [\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n ]\r\n\r\n kwargs['mask'] = '[mask[%s],' \\\r\n ' mask(SoftLayer_Hardware_Server)[%s]]' % \\\r\n (','.join(hw_items),\r\n ','.join(server_items))\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n if tags:\r\n _filter['hardware']['tagReferences']['tag']['name'] = {\r\n 'operation': 'in',\r\n 'options': [{'name': 'data', 'value': tags}],\r\n }\r\n\r\n if cpus:\r\n _filter['hardware']['processorPhysicalCoreAmount'] = \\\r\n query_filter(cpus)\r\n\r\n if memory:\r\n _filter['hardware']['memoryCapacity'] = query_filter(memory)\r\n\r\n if hostname:\r\n _filter['hardware']['hostname'] = query_filter(hostname)\r\n\r\n if domain:\r\n _filter['hardware']['domain'] = query_filter(domain)\r\n\r\n if datacenter:\r\n _filter['hardware']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n\r\n if nic_speed:\r\n _filter['hardware']['networkComponents']['maxSpeed'] = \\\r\n query_filter(nic_speed)\r\n\r\n if public_ip:\r\n _filter['hardware']['primaryIpAddress'] = \\\r\n query_filter(public_ip)\r\n\r\n if private_ip:\r\n _filter['hardware']['primaryBackendIpAddress'] = \\\r\n query_filter(private_ip)\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n return self.account.getHardware(**kwargs)",
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def supportedSoftwares():\n return [\"any\"]",
"def supportedSoftwares():\n return [\"any\"]",
"def get_sw_version():\n done = False\n if len(sys.argv) != 2:\n print(\"Give hostname of the device please!\")\n return\n in_host = sys.argv[1]\n #device_list = ret_device_list()\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n for device in device_list['response']:\n if str(device['hostname']) != in_host:\n continue\n device_ip = device['managementIpAddress']\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device/ip-address/\" + device_ip\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n image_details = resp.json()\n sw_version = image_details['response']['softwareVersion']\n print(\"Host: \" + in_host + \" IP: \" + device_ip + \" software version: \" + sw_version + \"\\n\")\n\n # Now suggest the patches\n\n print(\"You need the following Patches: \") \n print(patches[sw_version])\n #pdb.set_trace()\n #page = requests.get('https://wwwin-ottawa.cisco.com/tfoggoa/Scrubber/showquery.html?query=tmondal-7')\n #processed_page = BeautifulSoup(page.content, 'html.parser') \n #page = requests.get('http://www.fabpedigree.com/james/mathmen.htm')\n #processed_page = BeautifulSoup(page.content, 'html.parser')\n #for td in processed_page.select('td'):\n # print(td.text)",
"def get_hardware(hardware_name: str) -> str:\n fixed_name = \"-\".join(hardware_name.lower().split())\n output = _get_content(fixed_name, \"hardware\")\n\n return output",
"def supported_firmware_interfaces(self):\n return [fake.FakeFirmware] + super().supported_firmware_interfaces",
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def hardware_info(self, mask=0xFFFFFFFF):\n buf = (ctypes.c_uint32 * 32)()\n res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))\n if res != 0:\n raise errors.JLinkException(res)\n return list(buf)",
"def firmware(self) -> str:\n return self._device_info[\"Firmware\"]",
"def getHardware(self):\n return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0])",
"def getFirmwareVersion(self, *id_list):\n if id_list == ():#Empty list\n return -1\n elif len(id_list) == 1:#Just one ID.\n pkt = Packet.makeReadPacket(id_list[0],xl320.XL320_FIRMWARE_VERSION)\n else:\n pkt = Packet.makeSyncReadPacket(xl320.XL320_FIRMWARE_VERSION,id_list)\n\n ans,err_num,err_str = self.serial.sendPkt(pkt)\n if ans == []:#In case of an empty packet arrives\n return -2\n else:\n data = []\n for index,val in enumerate(id_list):\n #print (index,val)\n data.append(val) #Append the ID value\n data.append(ans[index*12+9])#Append the respective ID's data\n return data",
"def _get_server_hardware_mac(server_hardware):\n sh_physical_port = None\n\n if server_hardware.get('portMap'):\n for device in server_hardware.get(\n 'portMap', {}).get('deviceSlots', ()):\n for physical_port in device.get('physicalPorts', ()):\n if physical_port.get('type') == 'Ethernet':\n sh_physical_port = physical_port\n break\n if sh_physical_port:\n for virtual_port in sh_physical_port.get('virtualPorts', ()):\n # NOTE(nicodemos): Ironic oneview drivers needs to use a\n # port that type is Ethernet and function identifier 'a' for\n # this FlexNIC to be able to make a deploy using PXE.\n if virtual_port.get('portFunction') == 'a':\n return virtual_port.get('mac', ()).lower()\n raise exception.OneViewError(\n _(\"There is no Ethernet port on the Server Hardware: %s\") %\n server_hardware.get('uri'))\n else:\n raise exception.OneViewError(\n _(\"The Server Hardware: %s doesn't have a list of adapters/slots, \"\n \"their ports and attributes. This information is available only \"\n \"for blade servers. Is this a rack server?\") %\n server_hardware.get('uri'))",
"async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"def firmwares(self):\n return FirmwareCollection(client=self)",
"def get_hardware(self, hardware_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'hostname',\r\n 'domain',\r\n 'provisionDate',\r\n 'hardwareStatus',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'notes',\r\n 'privateNetworkOnlyFlag',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'networkManagementIpAddress',\r\n 'userData',\r\n 'datacenter',\r\n '''networkComponents[id, status, speed, maxSpeed, name,\r\n ipmiMacAddress, ipmiIpAddress, macAddress, primaryIpAddress,\r\n port, primarySubnet[id, netmask, broadcastAddress,\r\n networkIdentifier, gateway]]''',\r\n 'hardwareChassis[id,name]',\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n '''operatingSystem[\r\n softwareLicense[softwareDescription[manufacturer,\r\n name,\r\n version,\r\n referenceCode]],\r\n passwords[username,password]]''',\r\n 'billingItem.recurringFee',\r\n 'hourlyBillingFlag',\r\n 'tagReferences[id,tag[name,id]]',\r\n 'networkVlans[id,vlanNumber,networkSpace]',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.hardware.getObject(id=hardware_id, **kwargs)",
"def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')",
"def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()",
"async def _report_firmware(self, sysex_data):\n # first byte after command is major number\n firmware_report_iterator = iter(sysex_data)\n\n major = sysex_data[1]\n version_string = str(major)\n\n # next byte is minor number\n minor = sysex_data[2]\n\n # append a dot to major number\n version_string += '.'\n\n # append minor number\n version_string += str(minor)\n # add a space after the major and minor numbers\n version_string += ' '\n\n # slice the identifier - from the first byte after the minor\n # number up until, but not including the END_SYSEX byte\n\n name = sysex_data[3:-1]\n firmware_name_iterator = iter(name)\n # convert the identifier to printable text and add each character\n # to the version string\n for e in firmware_name_iterator:\n version_string += chr(e + (next(firmware_name_iterator) << 7))\n\n # store the value\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = version_string",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"async def _report_firmware(self, sysex_data):\n # first byte after command is major number\n major = sysex_data[1]\n version_string = str(major)\n\n # next byte is minor number\n minor = sysex_data[2]\n\n # append a dot to major number\n version_string += '.'\n\n # append minor number\n version_string += str(minor)\n # add a space after the major and minor numbers\n version_string += ' '\n\n # slice the identifier - from the first byte after the minor\n # number up until, but not including the END_SYSEX byte\n\n name = sysex_data[3:-1]\n\n # convert the identifier to printable text and add each character\n # to the version string\n for e in name:\n version_string += chr(e)\n\n # store the value\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = version_string"
] | [
"0.6788801",
"0.6719903",
"0.65344447",
"0.6365518",
"0.62952614",
"0.6102253",
"0.60330147",
"0.60206366",
"0.59811133",
"0.58125216",
"0.57846195",
"0.57846195",
"0.573526",
"0.5728773",
"0.5726028",
"0.56026626",
"0.5578904",
"0.556851",
"0.55412483",
"0.55307704",
"0.55182457",
"0.54797727",
"0.5423191",
"0.54200524",
"0.538794",
"0.53621215",
"0.53481644",
"0.5347544",
"0.5338104",
"0.5323473"
] | 0.7410056 | 0 |
Retrieves the list of BIOS/UEFI settings of the server hardware resource. [Arguments] | def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):
return self.sh.get(uri=uri, api=api, headers=headers, param='/bios') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def test_get_bios_boot_mode_list(self):\n pass",
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def fusion_api_get_server_hardware_firmware(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/firmware')",
"def _get_bios_settings_resource(self, data):\n try:\n bios_settings_uri = data['links']['Settings']['href']\n except KeyError:\n msg = ('BIOS Settings resource not found.')\n raise exception.IloError(msg)\n\n status, headers, bios_settings = self._rest_get(bios_settings_uri)\n if status != 200:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n return headers, bios_settings_uri, bios_settings",
"def ex_get_hypervisor_sysinfo(self):\n xml = self.connection.getSysinfo()\n etree = ET.XML(xml)\n\n attributes = [\"bios\", \"system\", \"processor\", \"memory_device\"]\n\n sysinfo = {}\n for attribute in attributes:\n element = etree.find(attribute)\n entries = self._get_entries(element=element)\n sysinfo[attribute] = entries\n\n return sysinfo",
"def get_bios_settings(bmc):\n bios_settings = bmc.list_bios_settings()\n # Convert the settings to something that is JSON-serialisable.\n settings = {}\n for param, value in bios_settings.items():\n setting = {}\n # Not all attributes exist on all settings, so allow them to be absent.\n attrs = {\n 'current_value',\n 'pending_value',\n 'possible_values',\n }\n for attr in attrs:\n if hasattr(value, attr):\n setting[attr] = getattr(value, attr)\n settings[param] = setting\n return settings",
"def set_bios_bootmode_uefi(ip, login_account, login_password, system_id):\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,\n password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=utils.g_AUTH)\n except:\n traceback.print_exc()\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status != 200:\n error_message = utils.get_extended_error(response_system_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (system_url, response_system_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else:\n # Get the bios resource\n bios_url = response_system_url.dict['Bios']['@odata.id']\n response_bios_url = REDFISH_OBJ.get(bios_url, None)\n if response_bios_url.status != 200:\n error_message = utils.get_extended_error(response_bios_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (bios_url, response_bios_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else: # Get bios success\n # Seek boot mode from bios attributes\n attribute_bootmode = None\n attributes = response_bios_url.dict['Attributes']\n for attribute in attributes:\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"SystemBootMode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"Boot\" in attribute and \"Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n result = {'ret': False, 'msg': \"Can not found BootMode attribute in response of url %s\" %(bios_url)}\n REDFISH_OBJ.logout()\n return result\n\n # Get boot mode setting guide from bios registry\n WarningText = None\n ValueName = None\n bios_registry_url = \"/redfish/v1/Registries/\" + response_bios_url.dict['AttributeRegistry']\n response_bios_registry_url = REDFISH_OBJ.get(bios_registry_url, None)\n if response_bios_registry_url.status == 200:\n locations = response_bios_registry_url.dict['Location']\n bios_regjson_url = None\n for location in locations:\n if 'en' in location['Language']:\n bios_regjson_url = location['Uri']\n break\n if bios_regjson_url:\n response_bios_regjson_url = REDFISH_OBJ.get(bios_regjson_url, None)\n if response_bios_regjson_url.status == 200:\n regattributes = response_bios_regjson_url.dict['RegistryEntries']['Attributes']\n for regattribute in regattributes:\n if regattribute['AttributeName'] == attribute_bootmode:\n if 'WarningText' in regattribute:\n WarningText = regattribute['WarningText']\n for value in regattribute['Value']:\n if 'legacy' in value['ValueName'].lower():\n continue\n if 'uefi' in value['ValueName'].lower():\n ValueName = value['ValueName']\n break\n ValueName = value['ValueName']\n break\n \n # Perform patch to set\n if ValueName == None:\n ValueName = \"UEFIMode\"\n pending_url = response_bios_url.dict['@Redfish.Settings']['SettingsObject']['@odata.id']\n parameter = {attribute_bootmode: ValueName}\n attribute = {\"Attributes\": parameter}\n headers = {\"If-Match\": '*'}\n response_pending_url = REDFISH_OBJ.patch(pending_url, body=attribute, headers=headers)\n if response_pending_url.status in [200,204]:\n if WarningText:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful. WarningText: %s'% (WarningText) }\n else:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful'}\n elif response_pending_url.status == 405:\n result = {'ret': False, 'msg': \"Resource not supported\"}\n else:\n error_message = utils.get_extended_error(response_pending_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (\n pending_url, response_pending_url.status, error_message)}\n\n # Logout of the current session\n try:\n REDFISH_OBJ.logout()\n except:\n pass\n return result",
"def select_host_characteristics(self):\n return IMPL.select_host_characteristics()",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def fusion_api_get_server_hardware_environmental_config(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/environmentalConfiguration')",
"def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)",
"def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,\r\n domain=None, datacenter=None, nic_speed=None,\r\n public_ip=None, private_ip=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n hw_items = [\r\n 'id',\r\n 'hostname',\r\n 'domain',\r\n 'hardwareStatusId',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'datacenter',\r\n ]\r\n server_items = [\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n ]\r\n\r\n kwargs['mask'] = '[mask[%s],' \\\r\n ' mask(SoftLayer_Hardware_Server)[%s]]' % \\\r\n (','.join(hw_items),\r\n ','.join(server_items))\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n if tags:\r\n _filter['hardware']['tagReferences']['tag']['name'] = {\r\n 'operation': 'in',\r\n 'options': [{'name': 'data', 'value': tags}],\r\n }\r\n\r\n if cpus:\r\n _filter['hardware']['processorPhysicalCoreAmount'] = \\\r\n query_filter(cpus)\r\n\r\n if memory:\r\n _filter['hardware']['memoryCapacity'] = query_filter(memory)\r\n\r\n if hostname:\r\n _filter['hardware']['hostname'] = query_filter(hostname)\r\n\r\n if domain:\r\n _filter['hardware']['domain'] = query_filter(domain)\r\n\r\n if datacenter:\r\n _filter['hardware']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n\r\n if nic_speed:\r\n _filter['hardware']['networkComponents']['maxSpeed'] = \\\r\n query_filter(nic_speed)\r\n\r\n if public_ip:\r\n _filter['hardware']['primaryIpAddress'] = \\\r\n query_filter(public_ip)\r\n\r\n if private_ip:\r\n _filter['hardware']['primaryBackendIpAddress'] = \\\r\n query_filter(private_ip)\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n return self.account.getHardware(**kwargs)",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def fusion_api_get_server_hardware_firmware_compliance(self, body, api=None, headers=None):\n return self.sh.post(body=body, param='/firmware-compliance', api=api, headers=headers)",
"def _get_bios_setting(self, bios_property):\n headers, bios_uri, bios_settings = self._check_bios_resource([\n bios_property])\n return bios_settings[bios_property]",
"def intGet(): \n macchanger, ip, iwconfig = pathGet()\n interfaces = []\n a = str(subprocess.check_output(\"{} link show\".format(ip), shell=True))\n ints = a.split(': ')\n for i in range(len(ints)):\n if len(ints[i].split()) == 1:\n if ints[i] not in [\"1\", \"lo\", \"b'1\"]:\n interface = {'name':str(ints[i])}\n interfaces.append(interface)\n # Get interface properties\n for interface in interfaces:\n name = interface['name']\n macs = subprocess.check_output(\"{} -s {}\".format(macchanger, name), shell=True).decode(\"utf-8\")\n interface['cMac'] = macs.split()[2]\n interface['cVend'] = macs.split(\"(\")[1].split(\")\")[0]\n interface['pMac'] = macs.split(\"\\n\")[1].split()[2]\n interface['pVend'] = macs.split(\"\\n\")[1].split(\"(\")[1].split(\")\")[0]\n try:\n mon = subprocess.check_output(\"{} {} 2> /dev/null\".format(iwconfig, name), shell=True).split()\n mon1 = mon[3].decode(\"utf-8\").split(':')[1]\n if mon1 == 'off/any':\n mon1 = mon[4].decode(\"utf-8\").split(':')[1]\n interface['mon'] = mon1\n except:\n interface['mon'] = 'Wired'\n return(interfaces)",
"def fusion_api_get_li_fcoe_settings(self, uri, api=None, headers=None):\n param = '/fcoeSettings'\n return self.li.get(uri=uri, api=api, headers=headers, param=param)",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def get_pending_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n settings = sushy_system.bios.pending_attributes\n except sushy.exceptions.SushyError as e:\n msg = (self._('The pending BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n return settings",
"def fusion_api_get_server_hardware_utilization(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/utilization')",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def hardware_info(self, mask=0xFFFFFFFF):\n buf = (ctypes.c_uint32 * 32)()\n res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))\n if res != 0:\n raise errors.JLinkException(res)\n return list(buf)",
"def getHardware(self):\n return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0])",
"def get():\n\n mba_ctrl_info = caps.mba_ctrl_info()\n\n res = {\n 'supported': mba_ctrl_info['supported'],\n 'enabled': mba_ctrl_info['enabled']\n }\n return res, 200",
"def get_hardware(hardware_name: str) -> str:\n fixed_name = \"-\".join(hardware_name.lower().split())\n output = _get_content(fixed_name, \"hardware\")\n\n return output",
"def load_firmware_info(sets, client, file_name=None):\n from cp_lib.split_version import sets_version_to_str\n\n if SECTION_FW_INFO in sets:\n logging.debug(\"method #1 - is already in ./config/settings.ini\")\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets\n\n # check for a file such as \"./config/fw_info.json\"\n if file_name is None:\n file_name = os.path.join(DEF_GLOBAL_DIRECTORY, \"fw_info.json\")\n\n if os.path.exists(file_name):\n # method #2 - the file exists. Do this indirectly to avoid some\n # Win/Linux relative path issues\n logging.debug(\"method #2 - load file {}\".format(file_name))\n _file_han = open(file_name, \"r\")\n sets[SECTION_FW_INFO] = json.load(_file_han)\n _file_han.close()\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets\n\n # is still here, we'll do it the 'hard way' via Router API\n logging.debug(\"method #3 - use CS Client\")\n assert isinstance(client, CradlepointClient)\n\n save_state = client.show_rsp\n client.show_rsp = False\n result = client.get(\"status/fw_info\")\n client.show_rsp = save_state\n\n if result is None:\n raise CradlepointRouterOffline(\n \"Aborting; Router({}) is not accessible\".format(client.router_ip))\n\n if isinstance(result, str):\n result = json.loads(result)\n\n sets[SECTION_FW_INFO] = result\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets",
"def _check_bios_resource(self, properties=[]):\n\n system = self._get_host_details()\n if ('links' in system['Oem']['Hp'] and\n 'BIOS' in system['Oem']['Hp']['links']):\n # Get the BIOS URI and Settings\n bios_uri = system['Oem']['Hp']['links']['BIOS']['href']\n status, headers, bios_settings = self._rest_get(bios_uri)\n\n if status >= 300:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n # If property is not None, check if the bios_property is supported\n for property in properties:\n if property not in bios_settings:\n # not supported on this platform\n msg = ('BIOS Property \"' + property + '\" is not'\n ' supported on this system.')\n raise exception.IloCommandNotSupportedError(msg)\n\n return headers, bios_uri, bios_settings\n\n else:\n msg = ('\"links/BIOS\" section in ComputerSystem/Oem/Hp'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)"
] | [
"0.6440124",
"0.61365855",
"0.59612876",
"0.57465315",
"0.57459027",
"0.56982434",
"0.5689407",
"0.56827366",
"0.5650437",
"0.56126344",
"0.5584547",
"0.556429",
"0.54789084",
"0.54779667",
"0.5472801",
"0.53819984",
"0.53016454",
"0.52867734",
"0.5272355",
"0.5266106",
"0.5238729",
"0.5236483",
"0.5232377",
"0.52287704",
"0.5224554",
"0.5218015",
"0.5180065",
"0.5174045",
"0.51732",
"0.516278"
] | 0.64542997 | 0 |
Retrieves the list of firmware settings of the server hardware resource. [Arguments] | def fusion_api_get_server_hardware_firmware(self, uri, api=None, headers=None):
return self.sh.get(uri=uri, api=api, headers=headers, param='/firmware') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def fusion_api_get_server_hardware_firmware_compliance(self, body, api=None, headers=None):\n return self.sh.post(body=body, param='/firmware-compliance', api=api, headers=headers)",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def load_firmware_info(sets, client, file_name=None):\n from cp_lib.split_version import sets_version_to_str\n\n if SECTION_FW_INFO in sets:\n logging.debug(\"method #1 - is already in ./config/settings.ini\")\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets\n\n # check for a file such as \"./config/fw_info.json\"\n if file_name is None:\n file_name = os.path.join(DEF_GLOBAL_DIRECTORY, \"fw_info.json\")\n\n if os.path.exists(file_name):\n # method #2 - the file exists. Do this indirectly to avoid some\n # Win/Linux relative path issues\n logging.debug(\"method #2 - load file {}\".format(file_name))\n _file_han = open(file_name, \"r\")\n sets[SECTION_FW_INFO] = json.load(_file_han)\n _file_han.close()\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets\n\n # is still here, we'll do it the 'hard way' via Router API\n logging.debug(\"method #3 - use CS Client\")\n assert isinstance(client, CradlepointClient)\n\n save_state = client.show_rsp\n client.show_rsp = False\n result = client.get(\"status/fw_info\")\n client.show_rsp = save_state\n\n if result is None:\n raise CradlepointRouterOffline(\n \"Aborting; Router({}) is not accessible\".format(client.router_ip))\n\n if isinstance(result, str):\n result = json.loads(result)\n\n sets[SECTION_FW_INFO] = result\n sets[SECTION_FW_INFO][\"version\"] = sets_version_to_str(sets,\n SECTION_FW_INFO)\n return sets",
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def firmware(self) -> str:\n return self._device_info[\"Firmware\"]",
"def get_hardware(self, hardware_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'hostname',\r\n 'domain',\r\n 'provisionDate',\r\n 'hardwareStatus',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'notes',\r\n 'privateNetworkOnlyFlag',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'networkManagementIpAddress',\r\n 'userData',\r\n 'datacenter',\r\n '''networkComponents[id, status, speed, maxSpeed, name,\r\n ipmiMacAddress, ipmiIpAddress, macAddress, primaryIpAddress,\r\n port, primarySubnet[id, netmask, broadcastAddress,\r\n networkIdentifier, gateway]]''',\r\n 'hardwareChassis[id,name]',\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n '''operatingSystem[\r\n softwareLicense[softwareDescription[manufacturer,\r\n name,\r\n version,\r\n referenceCode]],\r\n passwords[username,password]]''',\r\n 'billingItem.recurringFee',\r\n 'hourlyBillingFlag',\r\n 'tagReferences[id,tag[name,id]]',\r\n 'networkVlans[id,vlanNumber,networkSpace]',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.hardware.getObject(id=hardware_id, **kwargs)",
"def get_hardware(hardware_name: str) -> str:\n fixed_name = \"-\".join(hardware_name.lower().split())\n output = _get_content(fixed_name, \"hardware\")\n\n return output",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def firmwares(self):\n return FirmwareCollection(client=self)",
"def hardware_info(self, mask=0xFFFFFFFF):\n buf = (ctypes.c_uint32 * 32)()\n res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))\n if res != 0:\n raise errors.JLinkException(res)\n return list(buf)",
"def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,\r\n domain=None, datacenter=None, nic_speed=None,\r\n public_ip=None, private_ip=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n hw_items = [\r\n 'id',\r\n 'hostname',\r\n 'domain',\r\n 'hardwareStatusId',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'datacenter',\r\n ]\r\n server_items = [\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n ]\r\n\r\n kwargs['mask'] = '[mask[%s],' \\\r\n ' mask(SoftLayer_Hardware_Server)[%s]]' % \\\r\n (','.join(hw_items),\r\n ','.join(server_items))\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n if tags:\r\n _filter['hardware']['tagReferences']['tag']['name'] = {\r\n 'operation': 'in',\r\n 'options': [{'name': 'data', 'value': tags}],\r\n }\r\n\r\n if cpus:\r\n _filter['hardware']['processorPhysicalCoreAmount'] = \\\r\n query_filter(cpus)\r\n\r\n if memory:\r\n _filter['hardware']['memoryCapacity'] = query_filter(memory)\r\n\r\n if hostname:\r\n _filter['hardware']['hostname'] = query_filter(hostname)\r\n\r\n if domain:\r\n _filter['hardware']['domain'] = query_filter(domain)\r\n\r\n if datacenter:\r\n _filter['hardware']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n\r\n if nic_speed:\r\n _filter['hardware']['networkComponents']['maxSpeed'] = \\\r\n query_filter(nic_speed)\r\n\r\n if public_ip:\r\n _filter['hardware']['primaryIpAddress'] = \\\r\n query_filter(public_ip)\r\n\r\n if private_ip:\r\n _filter['hardware']['primaryBackendIpAddress'] = \\\r\n query_filter(private_ip)\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n return self.account.getHardware(**kwargs)",
"def device_setting(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device_setting\"), kwargs)",
"def query_supported_software(self):\n api_uri = self._uri_dict.get('querySupportedSoftware')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def device_list(mmw):\n\n _device_list = list(mmw.device_and_parameter_info_dict.values())\n assert _device_list, \"Device list is empty\"\n\n for device in _device_list:\n device_address = device['info'].device_address\n device['object_dictionary'] = sod.ObjectDictionary(mmw, device_address)\n device['state_control'] = sst.StateControl(mmw, device_address)\n\n # Get the hardware description data from each node too.\n try: \n hardware_description_data = mmw.get_device_file(device_address, '.hardware_description')\n hardware_description = json.loads(hardware_description_data)\n device['hardware_description'] = hardware_description\n except Exception as e:\n logging.warning(\"Error retrieving .hardware_description: {}\".format(e))\n # If this fails, just ignore it and make the data empty.\n device['hardware_description'] = {}\n\n return _device_list",
"def software_config(self) -> pulumi.Output['outputs.RuntimeSoftwareConfigResponse']:\n return pulumi.get(self, \"software_config\")",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def fusion_api_get_server_hardware_environmental_config(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/environmentalConfiguration')",
"def feature_list(self):\n components = self._device_info.get(device_data_constants.KEY_COMPONENT, {})\n # Set is_rma_device.\n components['is_rma_device'] = self._is_rma_device\n return self._feature_list.Encode(components)",
"def hardware(self):\n return self._hardware",
"def supported_firmware_interfaces(self):\n return [fake.FakeFirmware] + super().supported_firmware_interfaces",
"def supportedSoftwares():\n return [\"any\"]",
"def supportedSoftwares():\n return [\"any\"]",
"async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)"
] | [
"0.74123716",
"0.6287687",
"0.614474",
"0.6060778",
"0.59741247",
"0.58582383",
"0.5718884",
"0.5666034",
"0.5656206",
"0.56125605",
"0.5581975",
"0.5557086",
"0.54525197",
"0.5431058",
"0.54239696",
"0.5376557",
"0.5361181",
"0.53297424",
"0.53006876",
"0.52842283",
"0.52651304",
"0.5249671",
"0.52366596",
"0.5227438",
"0.51991606",
"0.51966757",
"0.51917934",
"0.51881075",
"0.51881075",
"0.5180999"
] | 0.6545191 | 1 |
Gets the settings that describe the environmental configuration (supported feature set, calibrated minimum & maximum power, location & dimensions, ...) of the server hardware resource. [Arguments] | def fusion_api_get_server_hardware_environmental_config(self, uri, api=None, headers=None):
return self.sh.get(uri=uri, api=api, headers=headers, param='/environmentalConfiguration') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device_setting(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device_setting\"), kwargs)",
"def configuration(self):\n\t\n limits = dict(\n \t cpu = resource.getrlimit(resource.RLIMIT_CPU)[0],\n\t memory = resource.getrlimit(resource.RLIMIT_AS)[0],\n disk = resource.getrlimit(resource.RLIMIT_FSIZE)[0]\n )\n\t\n\ttmpdir = os.getenv('TMPDIR')\n\tif tmpdir:\n\t tag = os.path.basename(tmpdir)\n\t jobid, taskid, queue = tag.split('.')\n\telse:\n\t jobid = taskid = queue = None\n\t\n\tworkdir = os.getenv('SGE_O_WORKDIR')\n\tif not workdir:\n\t workdir = os.getcwd()\n\t\n\t# Get the real time limit.\n\tif queue is None:\n\t limits['time'] = None\n\telse:\n\t command = \"qconf -sq pa_medium | grep s_rt\"\n\t pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\t time = map(float, stdout.split()[1].split(':'))\n\t time = (time[0]*60.+time[1])*60.+time[2]\n\t limits['time'] = time \n\t\n\treturn dict(\n\t host = os.getenv('HOSTNAME'),\n\t jobid = jobid,\n\t taskid = taskid,\n\t queue = queue,\n\t limits = limits,\n\t tmpdir = tmpdir,\n\t workdir = workdir \n\t)",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def settings():\r\n\r\n config = cp.ConfigParser()\r\n config.read('settings.ini')\r\n \r\n files = config['files']\r\n model = config['model']\r\n plot = config['plot']\r\n \r\n file_format = files['format']\r\n species_file = r'data/' + files['species file']\r\n reactions_file = r'data/' + files['reactions file']\r\n output_file = 'output/' + files['output file']\r\n model_type = model['model type']\r\n density = model.getfloat('density')\r\n temperature = model.getfloat('temperature')\r\n start_time = model.getfloat('start time')\r\n end_time = model.getfloat('end time')\r\n outfile = plot['outfile for plotting']\r\n\r\n return file_format, species_file, reactions_file, output_file, model_type, density, temperature, start_time, end_time, outfile",
"def settings_information():\n return {\n \"version\": VERSION,\n \"modules_directory\": MODULES_DIR,\n \"web_directory\": WEB_DIR,\n \"dependencies_directory\": DEPENDENCIES_DIR,\n \"bot_directory\": BOT_DIR,\n \"bot_data_directory\": BOT_DATA_DIR,\n \"bot_image_directory\": BOT_IMAGE_DIR,\n \"local_data_directory\": LOCAL_DATA_DIR,\n \"local_data_database_directory\": LOCAL_DATA_DB_DIR,\n \"local_data_log_directory\": LOCAL_DATA_LOG_DIR,\n \"local_data_backup_directory\": LOCAL_DATA_BACKUP_DIR,\n \"database_name\": DB_NAME,\n \"database_file\": DB_FILE,\n \"authentication_base_url\": AUTH_BASE_URL,\n \"authentication_auth_url\": AUTH_AUTH_URL,\n \"tesseract_dependency_directory\": TESSERACT_DEPENDENCY_DIR,\n \"tesseract_directory\": TESSERACT_DIR,\n \"tesseract_path\": TESSERACT_PATH,\n }",
"def get_server_capabilities(self):\n capabilities = {}\n system = self._get_host_details()\n capabilities['server_model'] = system['Model']\n rom_firmware_version = (\n system['Oem']['Hp']['Bios']['Current']['VersionString'])\n capabilities['rom_firmware_version'] = rom_firmware_version\n capabilities.update(self._get_ilo_firmware_version())\n capabilities.update(self._get_number_of_gpu_devices_connected())\n if self._get_tpm_capability():\n capabilities['trusted_boot'] = 'true'\n\n if self._get_cpu_virtualization():\n capabilities['cpu_vt'] = 'true'\n if self._get_nvdimm_n_status():\n capabilities['nvdimm_n'] = 'true'\n try:\n self.get_secure_boot_mode()\n capabilities['secure_boot'] = 'true'\n except exception.IloCommandNotSupportedError:\n # If an error is raised dont populate the capability\n # secure_boot\n pass\n if self._is_sriov_enabled():\n capabilities['sriov_enabled'] = 'true'\n return capabilities",
"def GetRecommendedSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"getRecommendedSettings\", payload=payload, response_object=None\n )",
"def software_config(self) -> pulumi.Output['outputs.RuntimeSoftwareConfigResponse']:\n return pulumi.get(self, \"software_config\")",
"def settings(self) -> Optional[pulumi.Input['ConfigurationServiceSettingsArgs']]:\n return pulumi.get(self, \"settings\")",
"def system_properties(self):\r\n return dict(self._get_system_properties(self.java))",
"def software_config(self) -> Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]:\n return pulumi.get(self, \"software_config\")",
"def getConfig():\n\n config = rFile(\"/var/www/html/config.txt\").split()\n f = int(config[0])\n mode = config[1]\n\n if (f in DABchannels and (mode == \"explore\" or mode == \"monitor\")):\n return f, mode\n else:\n return 227360000, \"explore\" # Kamzik - Bratislava",
"def read_config(*args):\n\n ret = {}\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"get\")\n else:\n cmd = _traffic_line(\"-r\")\n\n try:\n for arg in args:\n log.debug(\"Querying: %s\", arg)\n ret[arg] = _subprocess(cmd + [arg])\n except KeyError:\n pass\n\n return ret",
"def settings(environment=None):\n if not environment:\n environment = get_environment()\n loader = Loader()\n return loader.settings(environment)",
"def advanced_settings(self):\n settings = ADVANCEDSETTINGS()\n ckresult(_dll.FMOD_System_GetAdvancedSettings(self._ptr, byref(settings)))\n return settings",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_settings(self):\n return {\n \"game_name\": self.game_name,\n \"n_epochs\": self.n_epochs,\n \"n_episodes\": self.n_episodes,\n \"n_frames\": self.n_frames,\n \"agent\": self.agent.get_settings(),\n \"results_dir\": self.results_dir,\n \"use_minimal_action_set\": self.use_minimal_action_set,\n }",
"def getParameters():\n paramWindDirection,paramWindSpeed,paramWaterHeight,paramRainFall = syncParameters.readParameters(\"./parameters.json\")\n print(\"WindDir: \"+str(paramWindDirection)+\" WindSpd: \"+str(paramWindSpeed)+\" WaterHeight: \"\n +str(paramWaterHeight)+\" RainFall: \"+str(paramRainFall))\n return paramWindDirection,paramWindSpeed,paramWaterHeight,paramRainFall",
"async def eap_options(request: web.Request) -> web.Response:\n return web.json_response(wifi.EAP_CONFIG_SHAPE, status=200)",
"def get_ha_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/high-availability\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_system_config(self):\n if self.is_vm_image():\n return None\n sc = self.virtual_environment[self.T_SC]\n # check whether virtual machine is Windows or Linux\n if sc[self.T_SC_OF] == self.WINDOWS:\n system_config = WindowsConfigurationSet(computer_name=sc[self.T_SC_HN],\n admin_password=sc[self.T_SC_UP],\n admin_username=sc[self.T_SC_UN])\n system_config.domain_join = None\n system_config.win_rm = None\n else:\n system_config = LinuxConfigurationSet(host_name=sc[self.T_SC_HN],\n user_name=sc[self.T_SC_UN],\n user_password=sc[self.T_SC_UP],\n disable_ssh_password_authentication=False)\n return system_config",
"def _get_conf(self):\n self.press_conf = self.sysconf['PressureRegulators']\n return self.press_conf['PressureRegulator%d' % self.id_]",
"def pg_resource_settings(self):\n mem = self.server_memory\n max_connections = int(self.postgresql_settings[\"max_connections\"])\n # pgtune isn't available anymore as of Ubuntu 16.04, so calculate a few\n # basic resources dynamically just in case\n return {\n # 25% of available RAM, up to 8GB\n \"shared_buffers\": \"%sMB\" % int(min(mem * 0.25, 8096)),\n # (2*RAM)/max_connections\n \"work_mem\": \"%sMB\" % int((mem * 2) / max_connections),\n # RAM/16 up to 1GB; high values aren't that helpful\n \"maintenance_work_mem\": \"%sMB\" % int(min(mem / 16, 1024)),\n # between 50-75%, should equal free + cached values in `top`\n \"effective_cache_size\": \"%sMB\" % int(mem * 0.7),\n }",
"def get_input_settings(self):\n\n input_settings = {\"name\": name,\n \"start\": self._start_settings, \"parallel\": self._parallel_settings ,\n \"electronic\": self._electronic_settings, \"magnetic\": self._magnetic_settings,\n \"hybrid\": self._hybrid_settings, \"hubbard\": self._hubbard_settings, \"misc_setting\": self._misc_settings}\n return input_settings",
"def config(self, *args):\n if len(args) == 1 and args[0].find('.') >= 0:\n return self._client.execute('showconfig', args[0]).strip()\n \n out = self._client.execute('showconfig', args)\n result = {}\n\n for line in out.splitlines():\n ks, v = line.split('=', 1)\n ks = ks.split('.')\n d = result\n for k in ks[:-1]:\n d = d.setdefault(k, {})\n d[ks[-1]] = v.strip()\n\n return result",
"def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)",
"def server_configurations_management_settings(self) -> Optional['outputs.ServerConfigurationsManagementSettingsResponse']:\n return pulumi.get(self, \"server_configurations_management_settings\")"
] | [
"0.59364355",
"0.5678775",
"0.5656607",
"0.56483084",
"0.5573717",
"0.5573717",
"0.5565864",
"0.556322",
"0.55470175",
"0.5477731",
"0.54643255",
"0.54310477",
"0.54299515",
"0.53964305",
"0.5395247",
"0.5387934",
"0.5375818",
"0.53739554",
"0.5369733",
"0.53642786",
"0.53506815",
"0.53463393",
"0.53384554",
"0.53324157",
"0.53200585",
"0.52990127",
"0.52965826",
"0.52945524",
"0.5279106",
"0.52769923"
] | 0.61910987 | 0 |
Refreshes a specified Server hardware URI [Arguments] | def fusion_api_refresh_server_hardware(self, body={"refreshState": "RefreshPending"}, uri=None, api=None, headers=None):
return self.sh.update(body, uri=uri, api=api, headers=headers, param='/refreshState') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def refresh(self, url, args, cancellationSignal):\n pass",
"def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)",
"def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)",
"def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')",
"def refresh():\n\tsocketio.emit('refresh')\n\treturn status()",
"def fusion_api_patch_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.patch(body, uri, api, headers)",
"def fusion_api_refresh_switch(self, uri, api=None, headers=None):\n return self.switch.refresh(uri, api, headers)",
"def _update(self, host):\n pass",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def reload(self, hardware_id, post_uri=None, ssh_keys=None):\r\n\r\n config = {}\r\n\r\n if post_uri:\r\n config['customProvisionScriptUri'] = post_uri\r\n\r\n if ssh_keys:\r\n config['sshKeyIds'] = [key_id for key_id in ssh_keys]\r\n\r\n return self.hardware.reloadOperatingSystem('FORCE', config,\r\n id=hardware_id)",
"async def _device_refresh(self, **kwargs):\n\n device_id = self._device_id\n if not device_id:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n if self._use_channel_info:\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=_command(COMMAND_REFRESH),\n raise_for_status=False,\n ) as resp:\n if resp.status == 409:\n self._state = STStatus.STATE_OFF\n return\n resp.raise_for_status()\n await resp.json()\n\n return",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"URI\")\n args = parser.parse_args()\n smart_client(args.URI)",
"def fusion_api_refresh_power_device(self, body, uri, api=None, headers=None):\n return self.pd.update(body=body, uri=uri, api=api, headers=headers, param='/refreshState')",
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def refresh():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"reload\")\n else:\n cmd = _traffic_line(\"-x\")\n\n return _subprocess(cmd)",
"def Refresh(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"refresh\", payload=payload, response_object=None)",
"def reinstall_host(self, hostid, config, **kwargs):\n pass",
"def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def update_endpoint_in_sipserver(self, endpoint: str, password: str) -> None:",
"def refresh():\n return __apf_cmd(\"-e\")",
"async def _service_refresh(self, part: str) -> None:\n _LOGGER.debug(\"Manually refresh %s\", part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s', part)",
"def update_firmware(self) -> str:",
"def cli(since, _input, digests):\n if \"://\" in _input:\n coro = make_digest([_input])\n else:\n coro = make_digests_from_config(_input, digests or None, since=since)\n\n main_run(coro)",
"def refresh(dataset, client):\n pass",
"def main():\n\n parser = OptionParser(description=\n \"Purge a single url from fastly.\")\n parser.add_option(\"-k\", \"--key\", dest=\"apikey\",\n default=\"\", help=\"fastly api key\")\n parser.add_option(\"-H\", \"--host\", dest=\"host\",\n help=\"host to purge from\")\n parser.add_option(\"-p\", \"--path\", dest=\"path\",\n help=\"path to purge\")\n\n (options, args) = parser.parse_args()\n for val in options.__dict__.values():\n if val is None:\n print \"Missing required options\"\n parser.print_help()\n sys.exit(1)\n\n client = fastly.connect(options.apikey)\n purge = client.purge_url(options.host, options.path)\n print purge",
"def command_reload(interface,command,args):\n command_unload(interface,command,args)\n command_load(interface,command,args)",
"def RefreshChassisTopology(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"refreshChassisTopology\", payload=payload, response_object=None\n )",
"def command_refresh_repo(self):\n repoinit.refresh(*self.args())",
"def main():\n\n context = yield from Context.create_client_context()\n\n yield from asyncio.sleep(2)\n\n payload = b\"0\"\n request = Message(code=PUT, payload=payload)\n request.opt.uri_host = '192.168.3.2'\n request.opt.uri_path = (\"nodes\", \"48102\", \"humidity\")\n\n response = yield from context.request(request).response\n\n print('Result: %s\\n%r'%(response.code, response.payload))"
] | [
"0.5874703",
"0.56862247",
"0.5517887",
"0.54256845",
"0.5390688",
"0.534994",
"0.5307161",
"0.52816164",
"0.5278022",
"0.5210121",
"0.5091328",
"0.5082049",
"0.50692445",
"0.50429875",
"0.5012613",
"0.49633986",
"0.49625674",
"0.4957929",
"0.4926195",
"0.48844868",
"0.47894317",
"0.47720686",
"0.47607315",
"0.474331",
"0.47367582",
"0.4735847",
"0.47255445",
"0.4723792",
"0.47105908",
"0.47066543"
] | 0.6275564 | 0 |
Returns an existing Logical Enclosure [Arguments] [Example] ${resp} = Fusion Api Get Logical Enclosure | | | | | def fusion_api_get_logical_enclosure(self, uri=None, api=None, headers=None, param=''):
return self.logical_enclosure.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, args, intent):\n\n try:\n db = get_db('expressions')\n db_results = db.get_intent_expressions(intent)\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions)\n resp.status_code = 200\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp",
"def alert_get_details_v1_command(client: Client, args: Dict[str, Any], return_v1_output: bool) -> Union[CommandResults, Dict]:\n new_args = {\n 'alert_id': args.get('alert-id')\n }\n\n command_results = alert_get_details_command(client, new_args)\n if return_v1_output:\n response = command_results.raw_response\n command_results = command_results.to_context()\n command_results['EntryContext'].update( # type: ignore[index]\n {'Redlock.Alert(val.ID === obj.ID)': alert_to_v1_context(response, args)})\n return command_results",
"def fusion_api_get_logical_downlink(self, uri=None, api=None, headers=None, param=''):\n return self.ld.get(uri=uri, api=api, headers=headers, param=param)",
"def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=True)\n return hc.get(req, resp)",
"def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=False)\n return hc.get(req, resp)",
"def Run(self, args):\n identifiers = args.CONCEPTS.api.Parse().AsDict()\n\n result = apigee.APIsClient.Describe(identifiers)\n\n # Must use vars(args) to check whether there's even a revision field in the\n # parsed args namespace. It's only present for ALPHA track.\n requested_revision = None\n if \"revision\" in vars(args):\n requested_revision = args.revision\n\n # If the user didn't ask for revision data, the response from\n # APIsClient.Describe() is good enough.\n if requested_revision is None and not args.verbose:\n return result\n\n rev_nums = result[\"revision\"]\n if requested_revision is not None:\n if requested_revision not in rev_nums:\n message = \"No revision %r among API %s's revisions: %s\"%(\n requested_revision, identifiers[\"apisId\"], rev_nums)\n raise exceptions.InvalidArgumentException(\"--revision\", message)\n # No need to check whether this revision exists within the original list;\n # if there's no such revision, RevisionsClient will raise an appropriate\n # error.\n rev_nums = [requested_revision]\n\n revisions = []\n for revision in rev_nums:\n identifiers[\"revisionsId\"] = revision\n revision_result = apigee.RevisionsClient.Describe(identifiers)\n del revision_result[\"name\"]\n revisions.append(revision_result)\n del result[\"revision\"]\n result[\"revisions\"] = revisions\n\n return result",
"async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)",
"def fusion_api_get_sas_li_logical_drive_enclosures(self, uri=None, param='', api=None, headers=None):\n param = \"/logical-drive-enclosures%s\" % param\n return self.sasli.get(uri=uri, param=param, api=api, headers=headers)",
"def healthcare():",
"def get_embedding(self, resp):\n\n feed_dict = {self.anchor: resp}\n embedding = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n return embedding",
"async def excuse(self, mask, target, args):\n url = \"https://api.githunt.io/programmingexcuses\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.text()",
"def get(last_name, first_name):\n hr = HRRepository.get(last_name=last_name, first_name=first_name)\n # server.logger.info(json.dumps(hr))\n # server.logger.info(hr)\n try:\n res = jsonify({\"data\": hr.json, \"status\": \"success\"})\n except:\n res = jsonify({\"hr\": hr})\n return make_response(res, 200)",
"def get_lab_output(name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLabResult]:\n ...",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure and attributes\n # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static\n # run 'shellfoundry generate' in order to create classes that represent your data model\n\n '''\n resource = LanforgeResource.create_from_context(context)\n resource.vendor = 'specify the shell vendor'\n resource.model = 'specify the shell model'\n\n port1 = ResourcePort('Port 1')\n port1.ipv4_address = '192.168.10.7'\n resource.add_sub_resource('1', port1)\n\n return resource.create_autoload_details()\n '''\n return AutoLoadDetails([], [])",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)",
"def get_hostname_from_sha(query_params=\"\",\n host=env.AMP.get(\"host\"),\n client_id=env.AMP_CLIENT_ID,\n api_key=env.AMP_API_KEY,\n):\n print(\"\\n==> Getting events from AMP\")\n i_got_it=0\n if i_got_it==0:\n print(cyan(env.get_line(),bold=True))\n print (yellow(\"Second : Call the correct AMP API with the correct syntax...The API call which gives you the list of all infected endpoints \"))\n print()\n print (yellow(\"Hint :\"))\n print (yellow(\"https://api-docs.amp.cisco.com/api_actions/details?api_action=GET+%2Fv1%2Fevents&api_host=api.eu.amp.cisco.com&api_resource=Event&api_version=v1\"))\n print()\n print (yellow(\"Change the value of i_got_it to 1 in order to move forward\"))\n sys.exit() \n #url = f\"https://{client_id}:{api_key}@{host}/v1/events\"\n response = requests.get(url, params=query_params, verify=False)\n if debug:\n print(cyan(env.get_line(),bold=True))\n print(cyan(response.json())) \n # Consider any status other than 2xx an error\n response.raise_for_status()\n events_list = response.json()\n if debug: \n events_list = response.json()\n print(green((events_list)))\n for events in events_list:\n #hostname=event['computer']['hostname'] \n print(red(events))\n '''\n hostname=response.json()['data'][0]['computer']['hostname']\n return hostname \n '''\n events_list = response.json()['data']\n return events_list",
"def getResourceDef(url, user, pWd, resourceName):\n \n print(\"getting resource for catalog:-\" + url + \" resource=\" + resourceName +\n ' user=' + user)\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n # print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\"} \n tResp = requests.get(apiURL, params={}, headers=header, auth=HTTPBasicAuth(user,pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n return tResp.status_code, json.loads(tResp.text)\n else:\n # not valid\n return tResp.status_code, None",
"def alert_search_v1_command(client: Client, args: Dict[str, Any], return_v1_output: bool) -> \\\n Union[CommandResults, List[Union[CommandResults, str]], Dict]:\n new_args = {\n 'time_range_unit': args.get('time-range-unit'),\n 'time_range_value': args.get('time-range-value'),\n 'time_range_date_from': args.get('time-range-date-from'),\n 'time_range_date_to': args.get('time-range-date-to'),\n 'filters': get_v1_filters(args),\n 'limit': args.get('limit', DEFAULT_LIMIT),\n 'detailed': 'true'\n }\n\n command_results = alert_search_command(client, new_args)\n if return_v1_output:\n response = command_results.raw_response\n\n context_path = 'Redlock.Alert(val.ID === obj.ID)'\n context: dict = {context_path: []}\n for alert in response: # type: ignore[attr-defined]\n context[context_path].append(alert_to_v1_context(alert, args))\n context['Redlock.Metadata.CountOfAlerts'] = len(response) # type: ignore[arg-type]\n\n command_results = command_results.to_context()\n command_results['EntryContext'].update(context) # type: ignore[index]\n\n if args.get('risk-grade'):\n return [RISK_GRADE_NOT_SUPPORTED_MSG, command_results]\n\n return command_results",
"async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})",
"def microsoft_365_defender_incidents_list_command(client: Client, args: dict) -> CommandResults:\n limit = arg_to_number(args.get('limit', MAX_ENTRIES), arg_name='limit', required=True)\n status = args.get('status')\n assigned_to = args.get('assigned_to')\n offset = arg_to_number(args.get('offset'))\n timeout = arg_to_number(args.get('timeout', TIMEOUT))\n odata = args.get('odata')\n\n if odata:\n try:\n odata = json.loads(odata)\n except json.JSONDecodeError:\n return_error(f\"Can't parse odata argument as JSON array.\\nvalue: {odata}\")\n\n response = client.incidents_list(limit=limit, status=status, assigned_to=assigned_to,\n skip=offset, timeout=timeout, odata=odata)\n\n raw_incidents = response.get('value')\n readable_incidents = [convert_incident_to_readable(incident) for incident in raw_incidents]\n if readable_incidents:\n headers = list(readable_incidents[0].keys()) # the table headers are the incident keys.\n human_readable = tableToMarkdown(name=\"Incidents:\", t=readable_incidents, headers=headers)\n\n else:\n human_readable = \"No incidents found\"\n\n return CommandResults(outputs_prefix='Microsoft365Defender.Incident', outputs_key_field='incidentId',\n outputs=raw_incidents, readable_output=human_readable)",
"def get_presence_examode_concepts(request):\n\n json_resp = {}\n json_resp['concepts'] = get_presence_exa_concepts()\n json_resp['labels'] = get_presence_exa_labels()\n # print(json_resp)\n return JsonResponse(json_resp)",
"def function_hola():\n return jsonify({\"status\": \"OK\"})",
"def get_resource_state():\n output = [f'{\"S. No.\":6}\\t{\"Resource\":50}\\t{\"Health State\":12}\\t{\"Reason\":100}\\n']\n\n for index, resource in enumerate(HEALTH_AGGREGATOR.resource_state):\n output.append(\n f'{index + 1:<6}\\t{resource:<50}\\t'\n f'{\"Healthy\" if HEALTH_AGGREGATOR.resource_state[resource][\"is_healthy\"] else \"Unhealthy\":<12}\\t'\n f'{HEALTH_AGGREGATOR.resource_state[resource][\"reason\"]:<100}\\n'\n )\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')",
"def clientHelloResp(n, e):\n status = \"105 Hello \"+ str(n) + \" \" + str(e)\n return status",
"def clientHelloResp(n, e):\n status = \"105 Hello \"+ str(n) + \" \" + str(e)\n return status",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')",
"def Run(self, args):\n p = parent.GetParent(args)\n return requests.List(parent=p, filter=(\n args.state.upper() if args.state else None))",
"def get_scan_enrollments_internal(session):\n LOG.debug(\"Exporting enrollment metadata for SCAN internal dashboard\")\n\n enrollments = datastore.fetch_rows_from_table(session, (\"shipping\", \"scan_redcap_enrollments_v1\"))\n\n return Response((row[0] + '\\n' for row in enrollments), mimetype=\"application/x-ndjson\")",
"def test_rsp_unknown_status(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test\"\n ds.SOPClassUID = DisplaySystem\n ds.SOPInstanceUID = \"1.2.3.4\"\n return 0xFFF0, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()"
] | [
"0.5261378",
"0.4999249",
"0.4968407",
"0.48965645",
"0.48746166",
"0.48531166",
"0.48407775",
"0.47135535",
"0.468885",
"0.46632856",
"0.4644957",
"0.462702",
"0.46217865",
"0.46129304",
"0.46071875",
"0.45919973",
"0.45574743",
"0.45559263",
"0.4542049",
"0.45400122",
"0.45324323",
"0.451946",
"0.4516413",
"0.4508088",
"0.4508088",
"0.44766942",
"0.44766942",
"0.4475926",
"0.44722483",
"0.44702584"
] | 0.5524138 | 0 |
Removes an existing Logical Enclosure [Arguments] [Example] ${resp} = Fusion Api Delete Logical Enclosure | | | | | | def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):
return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_delete():\n req_data = request.get_json()\n print('This is the request itself \\n', req_data)\n print(req_data['name'])\n flask_wms.delete_entry(req_data['name'])\n return 'Request recieved, delete method'",
"def test_delete_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/this_is_bilel\")\n c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])\n c.setopt(c.CUSTOMREQUEST, 'DELETE')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"",
"def delete(cm_response, **data):\n return cm_response",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"def sr_remove_c():\n req_data = request.get_json()\n logging.debug(\"req_data = \" + str(req_data))\n\n product_name = req_data['product_name']\n version_number = req_data['version_number']\n name = req_data['name']\n version = req_data['version']\n destination = req_data['destination']\n\n outcome = {\"name\": \"Fail\"}\n\n try:\n # create new association\n c_id = Component.query.filter_by(name=name, version=version).first().id\n sr_id = SoftwareRelease.query.filter_by(product_name=product_name, version_number=version_number).first().id\n a = Association.query.filter_by(software_release_id=sr_id, component_id=c_id, destination=destination).first()\n\n db.session.delete(a)\n\n db.session.commit()\n outcome['name'] = \"Success\"\n except:\n db.session.rollback()\n raise\n finally:\n db.session.close()\n return jsonify(outcome)",
"def DELETE(self, req):\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Origin'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Methods'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Headers'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Expose-Headers'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Max-Age'] = 'x'\r\n\r\n resp = req.get_response(self.app, method='POST', headers=req.headers)\r\n\r\n return resp",
"def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def delete(self, args, intent):\n if 'all' in args.keys() and args['all'] == True:\n try:\n db = get_db('expressions')\n db_results = db.delete_all_intent_expressions(intent)\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions)\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp \n elif args['expressions']:\n try:\n db = get_db('expressions')\n db_results = db.delete_expressions_from_intent(intent, args['expressions'])\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions, deleted_expressions=args['expressions'])\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp",
"def DeleteVariable(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def logical_drive_delete(controller_id, logical_drive_id):\n try:\n ld = controller.LogicalDrive(controller_id, logical_drive_id)\n info = ld.get_info()\n x = ld.delete()\n return dict(x)\n except:\n return {\"controller\": controller_id,\n \"logical_drive\": logical_drive_id,\n \"status\": \"Failed to delete\"}",
"def delete(self, **kwargs):\n url_str = self.base_url + \"/%s\" % kwargs['definition_id']\n newheaders = self.get_headers()\n resp, body = self.client.json_request('DELETE', url_str,\n headers=newheaders)\n return resp",
"def DELETE(self, env, start_response):\n key_args = set(['cors','lifecycle','policy','tagging','website'])\n\n qs = env.get('QUERY_STRING', '')\n args = urlparse.parse_qs(qs, 1)\n\n if not key_args & set(args):\n # DELETE a Bucket\n version = args.get('versionId')\n if version:\n vid = version[0]\n if vid.lower() == 'lastest':\n pass\n else:\n env['PATH_INFO'] = '/v1/AUTH_%s/%s/%s' % (quote(self.account_name),\n quote(self.version_name(self.container_name)),\n vid)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if status != HTTP_NO_CONTENT:\n if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n elif status == HTTP_NOT_FOUND:\n return self.get_err_response('NoSuchBucket')\n elif status == HTTP_CONFLICT:\n return self.get_err_response('BucketNotEmpty')\n else:\n return self.get_err_response('InvalidURI')\n\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n else:\n # DELETE specified data\n action = args.keys().pop()\n if action == 'cors':\n # delete cors\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_ORIGIN'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_MAX_AGE'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_EXPOSE_HEADERS'] = ''\n env['HTTP_X_CONTAINER_META_ACCESS_CONTROL_ALLOW_METHOD'] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'lifecycle':\n # delete lifecycle\n env['HTTP_X_CONTAINER_META_TRANS_AT'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_TRANS_CLASS'] = ''\n\n env['HTTP_X_CONTAINER_META_EXPIRATION_AT'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_AFTER'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_PREFIX'] = ''\n env['HTTP_X_CONTAINER_META_EXPIRATION_STATUS'] = ''\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'policy':\n # delete policy\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_POLICY'] = ''\n body_iter = self._app_call(env)\n status = self._get_status_int()\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'tagging':\n # delete tagging\n env2 = copy(env)\n container_info = get_container_info(env2, self.app)\n meta_keys = container_info['meta'].keys()\n for key in meta_keys:\n env['HTTP_X_CONTAINER_META_' + key.replace('-', '_').upper()] = ''\n env['QUERY_STRING'] = ''\n env['REQUEST_METHOD'] = 'POST'\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_NO_CONTENT\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n elif action == 'website':\n # delete website\n body = env['wsgi.input'].read()\n env['REQUEST_METHOD'] = 'POST'\n env['QUERY_STRING'] = ''\n env['HTTP_X_CONTAINER_META_WEBSITE'] = quote(body)\n\n body_iter = self._app_call(env)\n status = self._get_status_int()\n\n if is_success(status):\n resp = Response()\n resp.status = HTTP_OK\n return resp\n elif status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):\n return self.get_err_response('AccessDenied')\n else:\n return self.get_err_response('InvalidURI')\n else:\n return self.get_err_response('InvalidURI')",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete(self, op_id: str) -> Response:\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n try:\n output = Opportunity.objects.get(id=op_id).delete()\n except ValidationError as e:\n return bad_request(e.message)\n return jsonify(output)\n else:\n return forbidden()",
"def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)",
"def delete_legislation(self, expr_uri):\n resp = self.session.delete(self.url + expr_uri, timeout=self.timeout)\n self.check_for_error(resp)",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def remove(self, spo, context=None):\n\n uri = self.rest_services[\"statements\"]\n s,p,o = spo\n payload = dict()\n if s:\n payload[\"subj\"] = s.n3()\n if p:\n payload[\"pred\"] = p.n3()\n if o:\n payload[\"obj\"] = o.n3()\n if context:\n payload[\"context\"] = [context.n3()]\n\n #data = \" \".join(i.n3() for i in spo) +\" .\"\n #print(data)\n r = requests.delete(uri, params=payload)",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def unset(cls, client, resource, args) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tunsetresource = l3param()\n\t\t\t\treturn unsetresource.unset_resource(client, args)\n\t\texcept Exception as e :\n\t\t\traise e",
"def complete_remove_entity(self, \n coll_id, type_id, entity_id, \n default_continuation_url, request_params):\n continuation_url = (\n request_params.get('completion_url', None) or\n default_continuation_url\n )\n continuation_url_params = continuation_params(request_params)\n viewinfo = DisplayInfo(self, \"delete\", request_params, continuation_url)\n viewinfo.get_site_info(self.get_request_host())\n viewinfo.get_coll_info(coll_id)\n viewinfo.get_request_type_info(type_id)\n viewinfo.check_authorization(\"delete\")\n if viewinfo.http_response:\n return viewinfo.http_response\n typeinfo = viewinfo.curr_typeinfo\n message_vals = {'id': entity_id, 'type_id': type_id, 'coll_id': coll_id}\n messages = (\n { 'entity_removed': typeinfo.entitymessages['entity_removed']%message_vals\n })\n err = typeinfo.entityclass.remove(typeinfo.entityparent, entity_id)\n if err:\n return self.redirect_error(\n continuation_url, continuation_url_params, error_message=str(err)\n )\n return self.redirect_info(\n continuation_url, continuation_url_params, \n info_message=messages['entity_removed']\n )",
"def delete(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n\n if db_resource_data['type'] == (eon_const.\n EON_RESOURCE_TYPE_ESX_CLUSTER):\n msg = _(\"Delete operation not supported for type %s\"\n % db_resource_data['type'])\n raise exception.DeleteException(err=msg)\n\n _resource_data = _make_response(\n db_resource_data)\n _resource_data_log = deepcopy(_resource_data)\n _resource_data_log.pop(\"meta_data\", None)\n LOG.info(\"Details for the ID %s is: %s\" % (\n id_, logging.mask_password(_resource_data_log)))\n driver_obj = driver.load_resource_driver(\n db_resource_data['type'])\n driver_obj.validate_delete(db_resource_data)\n driver_obj.delete(context, id_)\n self.db_api.delete_resource(context, id_)\n # delete the data from hlm input model\n try:\n LOG.info(\"[%s] remove resource from input model\" % id_)\n hux_obj = HLMFacadeWrapper(context)\n resource_id = db_resource_data[eon_const.EON_RESOURCE_ID]\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete compute resource\")\n except facade_excep.NotFound:\n # log and do nothing\n LOG.warn(\"[%s] resource not found in hlm input model\" % id_)\n LOG.info(\"[%s]: Deleted resource from eon\" % id_)\n # Notify the message to consumers\n try:\n message = {\"resource_id\": id_,\n \"resource_state\": eon_const.EON_RESOURCE_STATE_REMOVED,\n \"resource_details\": _resource_data,\n }\n message_notifier.notify(context,\n message_notifier.EVENT_PRIORITY_INFO,\n message_notifier.EVENT_TYPE[\n 'removed'],\n message)\n except Exception as ex:\n LOG.exception(\n \"Exception while notifying the message : %s\" % ex)\n except exception.NotFound as e:\n msg = (\"Failed to delete resource %s. Error: %s\") % (\n _resource_data['name'], e.message)\n LOG.exception(msg)\n raise e",
"def remove(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n\n if host.status.state != 'maintenance':\n host.deactivate()\n StatusUtils.wait(self.get, 'maintenance')\n\n # delete\n response = host.delete()\n\n # wait till gone\n StatusUtils.waitRemoved(self.get)\n\n return response",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass"
] | [
"0.58386034",
"0.5591081",
"0.5581158",
"0.5552778",
"0.55016667",
"0.548756",
"0.54817337",
"0.5416942",
"0.5416942",
"0.53931206",
"0.5359185",
"0.52967477",
"0.52838844",
"0.5274969",
"0.5272591",
"0.5265596",
"0.5246178",
"0.52439135",
"0.5243358",
"0.5240382",
"0.523309",
"0.52310544",
"0.5213329",
"0.521102",
"0.52092683",
"0.52034384",
"0.519218",
"0.5182471",
"0.5166523",
"0.5165199"
] | 0.6576194 | 0 |
Update an logical enclosure. [Arguments] | def fusion_api_update_logical_enclosure(self, body, uri, param='', api=None, headers=None, etag=None):
return self.logical_enclosure.put(body, uri, param, api, headers, etag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.update(body, uri, api, headers)",
"def fusion_api_update_logical_enclosure_from_group(self, uri=None, api=None, headers=None):\n param = '/updateFromGroup'\n return self.logical_enclosure.put(body=None, uri=uri, param=param, api=api, headers=headers)",
"def update(*args):",
"def update(self, operation, operand0, operand1, operand2):\n self.operation = operation\n self.operand0 = operand0\n self.operand1 = operand1\n self.operand2 = operand2",
"def update(self, v, r):\n pass",
"def update_E(self):",
"def update(self, *args, **kwargs):\n # callable, but does nothing by default",
"def setOp(self, value):\n raise UnsupportedOperationException(\"Cannot change operator status of a block\")",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update():",
"def update():",
"def update(self, *args, **kwargs):",
"def update(self, *args, **kw):\n pass",
"def update(self, other):\n if self.active:\n self.active.functor.update(other)",
"def update(self, x):\n pass",
"def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None):\n return self.enclosure_group.update(body, uri, api, headers)",
"def update( ):\r\n pass",
"def toggle(*args, above: bool=True, below: bool=True, boundary: bool=True, boundingBox:\n bool=True, controlVertex: bool=True, doNotWrite: bool=True, editPoint: bool=True,\n extent: bool=True, facet: bool=True, geometry: bool=True, gl: bool=True,\n highPrecisionNurbs: bool=True, hull: bool=True, latticePoint: bool=True,\n latticeShape: bool=True, localAxis: bool=True, newCurve: bool=True, newPolymesh:\n bool=True, newSurface: bool=True, normal: bool=True, origin: bool=True, point:\n bool=True, pointDisplay: bool=True, pointFacet: bool=True, rotatePivot: bool=True,\n scalePivot: bool=True, selectHandle: bool=True, state: bool=True, surfaceFace:\n bool=True, template: bool=True, uvCoords: bool=True, vertex: bool=True, q=True,\n query=True, **kwargs)->Union[bool, Any]:\n pass",
"def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None):\n return self.enclosure.patch(body, uri, api, headers, etag)",
"def update(self,update_flags):\n pass",
"def setDirty(self, *args, **kwargs):\n assert self.operator is not None, (\"Slot '{}' cannot be set dirty,\"\n \" slot not belonging to any\"\n \" actual operator instance\".format(self.name))\n\n if self.stype.isConfigured():\n if len(args) == 0 or not isinstance(args[0], rtype.Roi):\n roi = self.rtype(self, *args, **kwargs)\n else:\n roi = args[0]\n\n for c in self.partners:\n c.setDirty(roi)\n\n # call callbacks\n self._sig_dirty(self, roi)\n\n if self._type == \"input\" and self.operator.configured():\n self.operator.propagateDirty(self, (), roi)",
"def Modified(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Modified(self, *args)",
"def update(self):\n self._is_on = self._is_on",
"def update(self, *args, **kwargs):\n raise NotImplementedError",
"def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')"
] | [
"0.5908056",
"0.55920726",
"0.54749054",
"0.54528177",
"0.54515225",
"0.5351787",
"0.534792",
"0.53248125",
"0.5323249",
"0.5323249",
"0.5323249",
"0.5323249",
"0.5323249",
"0.5323249",
"0.53174627",
"0.53174627",
"0.53081334",
"0.53021306",
"0.5227843",
"0.5209723",
"0.5198339",
"0.5175297",
"0.51303595",
"0.512267",
"0.5120642",
"0.5072628",
"0.50570637",
"0.50453043",
"0.50340956",
"0.5032574"
] | 0.69521064 | 0 |
Updates the logical enclosure to match its enclosure group [Arguments] | def fusion_api_update_logical_enclosure_from_group(self, uri=None, api=None, headers=None):
param = '/updateFromGroup'
return self.logical_enclosure.put(body=None, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None):\n return self.enclosure_group.update(body, uri, api, headers)",
"def command_group(group_name):\n def wrapper(func):\n func.group_name = group_name\n return func\n return wrapper",
"def fusion_api_update_logical_enclosure(self, body, uri, param='', api=None, headers=None, etag=None):\n return self.logical_enclosure.put(body, uri, param, api, headers, etag)",
"def keyingGroup(*args, activator: Union[name, bool]=None, addElement: name=None, afterFilters:\n bool=True, category: Union[AnyStr, bool]=\"\", clear: name=None, color: Union[int,\n bool]=0, copy: name=None, edges: bool=True, editPoints: bool=True, empty:\n bool=True, excludeDynamic: bool=True, excludeRotate: bool=True, excludeScale:\n bool=True, excludeTranslate: bool=True, excludeVisibility: bool=True, facets:\n bool=True, flatten: name=None, forceElement: name=None, include: name=None,\n intersection: name=None, isIntersecting: name=None, isMember: name=None, layer:\n bool=True, minimizeRotation: bool=True, name: AnyStr=\"\", noSurfaceShader:\n bool=True, noWarnings: bool=True, nodesOnly: bool=True, remove: name=None,\n removeActivator: name=None, renderable: bool=True, setActiveFilter:\n Union[AnyStr, bool]=\"\", size: bool=True, split: name=None, subtract: name=None,\n text: Union[AnyStr, bool]=\"\", union: name=None, vertices: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator",
"def reapply(self, circ):\n self._modifiers(circ.s(self.qargs[0]))",
"def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.update(body, uri, api, headers)",
"def add_args_to_group(cls, group: \"ArgumentGroup\") -> None:\n # group.description = 'For `Architect`, you can supply...'\n # group.add_argument('--server-option', help='Lets you customize')\n return",
"def __setitem__(self, name, attribs):\n \n assert(type(attribs) is list)\n \n self.register(Command(*([name] + attribs)))",
"def outlinerEditor(*args, allowMultiSelection: bool=True, alwaysToggleSelect: bool=True,\n animLayerFilterOptions: Union[AnyStr, bool]=\"\", attrAlphaOrder: Union[AnyStr,\n bool]=\"\", attrFilter: Union[AnyStr, bool]=\"\", autoExpand: bool=True,\n autoExpandLayers: bool=True, autoSelectNewObjects: bool=True,\n containersIgnoreFilters: bool=True, control: bool=True, defineTemplate:\n AnyStr=\"\", directSelect: bool=True, displayMode: Union[AnyStr, bool]=\"\",\n doNotSelectNewObjects: bool=True, docTag: Union[AnyStr, bool]=\"\",\n dropIsParent: bool=True, editAttrName: bool=True, exists: bool=True,\n expandAllItems: bool=True, expandAllSelectedItems: bool=True,\n expandAttribute: bool=True, expandConnections: bool=True, expandObjects:\n bool=True, feedbackItemName: bool=True, feedbackRowNumber: bool=True,\n filter: Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n getCurrentSetOfItem: Union[int, bool]=0, highlightActive: bool=True,\n highlightConnection: Union[AnyStr, bool]=\"\", highlightSecondary: bool=True,\n ignoreDagHierarchy: bool=True, ignoreHiddenAttribute: bool=True,\n ignoreOutlinerColor: bool=True, isChildSelected: Union[name, bool]=None,\n isSet: Union[int, bool]=0, isSetMember: Union[int, bool]=0,\n lockMainConnection: bool=True, longNames: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", mapMotionTrails: bool=True, masterOutliner:\n Union[AnyStr, bool]=\"\", niceNames: bool=True, object: Union[name, bool]=None,\n organizeByClip: bool=True, organizeByLayer: bool=True, panel: Union[AnyStr,\n bool]=\"\", parent: Union[AnyStr, bool]=\"\", parentObject: bool=True, pinPlug:\n Union[name, bool]=None, refresh: bool=True, removeFromCurrentSet: int=0,\n renameItem: int=0, renameSelectedItem: bool=True, renderFilterActive:\n bool=True, renderFilterIndex: Union[int, bool]=0, renderFilterVisible:\n bool=True, selectCommand: Union[Script, bool]=None, selectionConnection:\n Union[AnyStr, bool]=\"\", selectionOrder: AnyStr=\"\", setFilter: Union[AnyStr,\n bool]=\"\", setsIgnoreFilters: bool=True, showAnimCurvesOnly: bool=True,\n showAnimLayerWeight: bool=True, showAssets: bool=True,\n showAssignedMaterials: bool=True, showAttrValues: bool=True, showAttributes:\n bool=True, showCompounds: bool=True, showConnected: bool=True,\n showContainedOnly: bool=True, showContainerContents: bool=True, showDagOnly:\n bool=True, showLeafs: bool=True, showMuteInfo: bool=True, showNamespace:\n bool=True, showNumericAttrsOnly: bool=True, showParentContainers: bool=True,\n showPinIcons: bool=True, showPublishedAsConnected: bool=True,\n showReferenceMembers: bool=True, showReferenceNodes: bool=True,\n showSelected: bool=True, showSetMembers: bool=True, showShapes: bool=True,\n showTextureNodesOnly: bool=True, showTimeEditor: bool=True, showUVAttrsOnly:\n bool=True, showUnitlessCurves: bool=True, showUpstreamCurves: bool=True,\n sortOrder: Union[AnyStr, bool]=\"\", stateString: bool=True, transmitFilters:\n bool=True, unParent: bool=True, unlockMainConnection: bool=True, unpinPlug:\n name=None, updateMainConnection: bool=True, useTemplate: AnyStr=\"\", q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def _add_inverse_to_argparse(self, parser, group):\n container = self._get_argparse_container(parser, group)\n kwargs = self._get_argparse_kwargs(group, action='store_false')\n prefix = self._get_argparse_prefix('no', group.name if group else None)\n deprecated_names = []\n for opt in self.deprecated_opts:\n deprecated_name = self._get_deprecated_cli_name(opt.name,\n opt.group,\n prefix='no')\n if deprecated_name is not None:\n deprecated_names.append(deprecated_name)\n kwargs[\"help\"] = \"The inverse of --\" + self.name\n self._add_to_argparse(parser, container, self.name, None, kwargs,\n prefix, self.positional, deprecated_names)",
"def reapply(self, circ):\n self._modifiers(circ.sdg(self.qargs[0]))",
"def sub_command_group(self, name=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND_GROUP\r\n \r\n new_func = SubCommandGroup(func, name=name, **kwargs)\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator",
"def fusion_api_create_enclosure_group_payload(self, body, lig_map=None, api=None):\n return self.enclosure_group.make_body(api, body, lig_map)",
"def overrideModifier(*args, clear: bool=True, press: Union[AnyStr, List[AnyStr]]=\"\", release:\n Union[AnyStr, List[AnyStr]]=\"\", **kwargs)->None:\n pass",
"def set_group_selector(*args):\n return _ida_segment.set_group_selector(*args)",
"def ctxEditMode(*args, buttonDown: bool=True, buttonUp: bool=True, **kwargs)->None:\n pass",
"def addToGroup(self, frontorback, drawing, svggroup, **extra): \r\n super(byA_FB_SideLine, self).addToGroup(drawing, svggroup, **extra)",
"def patches(*args):\n with cros_build_lib.ContextManagerStack() as stack:\n for arg in args:\n stack.Add(lambda ret=arg: ret)\n yield",
"def fusion_api_create_enclosure_group(self, body, api=None, headers=None):\n return self.enclosure_group.create(body, api, headers)",
"def _expr_kernel(self,\n arguments: Any,\n batch: RecordBatch) -> Any:\n pass",
"def _closePath(self):\n self._commands.append(\"Z\")\n self._lastCommand = \"Z\"\n self._lastX = self._lastY = None",
"def AddArguments(cls, argument_group):",
"def enter_group():\n logline(\"\\\\\", indent=False)\n global group_length\n group_length = group_length + 1",
"def _api_modifier(self, event):\n clsid = guid_name(event[\"arguments\"].get(\"clsid\"))\n if clsid:\n event[\"flags\"][\"clsid\"] = clsid\n\n iid = event[\"arguments\"].get(\"iid\")\n if isinstance(iid, (tuple, list)):\n event[\"flags\"][\"iid\"] = [guid_name(x) for x in iid]\n elif guid_name(iid):\n event[\"flags\"][\"iid\"] = guid_name(iid)",
"def addEnclosure(self, enclosure):\n self.enclosures.append(enclosure)",
"def add_argument(self, *args, **kwargs):\n\n if 'group' not in kwargs or kwargs['group'] is None:\n self._parser.add_argument(*args, **kwargs)\n\n else:\n group = self._group_by_title(kwargs['group'])\n\n if group is None:\n raise ValueError(\n 'Trying to reference nonexisten argument group.'\n )\n\n else:\n kwargsr = {k: kwargs[k] for k in kwargs.keys() if 'group' != k}\n group.add_argument(*args, **kwargsr)",
"def RewriteOR(self, left, right):\n return None",
"def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)",
"def setKeyCtx(*args, breakdown: bool=True, exists: bool=True, history: bool=True, image1:\n Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr,\n bool]=\"\", name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[bool, Any]:\n pass"
] | [
"0.57244945",
"0.532607",
"0.5054181",
"0.501214",
"0.4829937",
"0.47982556",
"0.470352",
"0.46295822",
"0.46018374",
"0.45309988",
"0.45170015",
"0.45147574",
"0.45110768",
"0.44993183",
"0.4493856",
"0.44928628",
"0.44823283",
"0.4478239",
"0.4476593",
"0.44687688",
"0.4444774",
"0.4443774",
"0.4434437",
"0.44307923",
"0.44287744",
"0.44227302",
"0.441121",
"0.44054565",
"0.4400805",
"0.43972102"
] | 0.55514556 | 1 |
Updates a Server Hardware Type. [Arguments] | def fusion_api_edit_server_hardware_types(self, body, uri, api=None, headers=None):
return self.types.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)",
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def fusion_api_add_server_hardware(self, body, api=None, headers=None, param=''):\n return self.sh.post(body, api, headers, param)",
"def fusion_api_patch_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.patch(body, uri, api, headers)",
"def put(self):\n type_model = request.json\n\n type_model = namedtuple(\"Type\", type_model.keys())(*type_model.values())\n repository = TypeRepository(\n FLASK_APP.config[\"DBUSER\"],\n FLASK_APP.config[\"DBPASS\"],\n FLASK_APP.config[\"DBHOST\"],\n FLASK_APP.config[\"DBPORT\"],\n FLASK_APP.config[\"DBNAME\"])\n try:\n type_model = repository.update(type_model)\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Type sucessfuly updated',\n 'put()',\n str(type.__dict__),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=type_model,\n message=\"Type sucessfuly updated.\",\n status=204), 200\n except Exception as err:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Error',\n 'Internal server error',\n 'put()',\n str(err),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=err,\n message=\"Internal server error: \" + str(err),\n status=500)",
"def fusion_api_edit_server_hardware_power_state(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/powerState')",
"def _edit_server_hardware(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n if not selenium2lib._is_element_present(FusionServerHardwarePage.ID_PAGE_LABEL):\n base_page.navigate_base(FusionServerHardwarePage.ID_PAGE_LABEL,\n FusionUIBaseElements.ID_MENU_LINK_SERVER_HARDWARE, \"css=span.hp-page-item-count\")\n if not serverhardware.power_off_server_by_name(profile.server):\n logger._warn(\"Failed to powerOff the server %s\" % profile.server)\n logger._warn(\"Can't proceed with server profile creation on server %s\" % profile.server)\n continue\n # Navigating to Server profile page\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n ui_lib.wait_for_element(FusionUIBaseElements.ID_MAIN_MENU_CONTROL, PerfConstants.DEFAULT_SYNC_TIME)\n navigate()\n\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.profilename not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.profilename)\n continue\n if profile.server == \"\":\n logger._warn(\"Mandatory fields to edit server hardware can't be empty\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._log_to_console_and_log_file(\"Server is not powered off, and switching off now\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWER_PRESS_AND_HOLD)\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_SERVER_POWER_OFF_VALIDATE, PerfConstants.SERVER_POWER_OFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._warn(\"Failed to power off the server %s\" % profile.server)\n else:\n logger._log_to_console_and_log_file(\"Successfully server %s is powered off\" % profile.server)\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION)\n # New Code\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION):\n errMsg = selenium2lib._get_text(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION_CONTENT)\n logger._warn(errMsg)\n logger._warn(\"Unable to edit profile server hardware %s\" % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE)\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n strTimeStamp = selenium2lib._get_text(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n logger._log_to_console_and_log_file(strTimeStamp)\n\n # Verify profile server hardware updation status in server profile page (Under Activity tab)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp), PerfConstants.CREATE_SERVER_PROFILE_TIME)\n\n if selenium2lib._is_element_present(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp)):\n logger._log_to_console_and_log_file(\"Server profile '%s' is edited successfully\" % profile.profilename)\n else:\n logger._warn(\"Failed to edit server profile '%s' hardware\" % profile.profilename)",
"def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)",
"def server_type(self):\n ...",
"def hardware(*args, brdType: bool=True, cpuType: bool=True, graphicsType: bool=True, megaHertz:\n bool=True, numProcessors: bool=True, **kwargs)->AnyStr:\n pass",
"def hw_type(self, hw_type):\n if self.local_vars_configuration.client_side_validation and hw_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `hw_type`, must not be `None`\") # noqa: E501\n\n self._hw_type = hw_type",
"def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)",
"def fusion_api_import_server_hardware_type_for_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.post(body, uri, api=api, headers=headers)",
"def set_type(*args):\n return _ida_hexrays.set_type(*args)",
"def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)",
"def fusion_api_delete_server_hardware_types(self, name=None, uri=None, api=None, headers=None):\n return self.types.delete(name=name, uri=uri, api=api, headers=headers)",
"def set_device_type(device_type):\n device_type_data = {\n 'name': device_type,\n 'label': normalize_label(device_type),\n 'deviceColor': sigfox_main_color,\n 'deviceIcon': 'wifi',\n 'variableColor': sigfox_secondary_color,\n 'properties': [],\n 'variables': []\n }\n return device_type_data",
"def update(s_socket):\r\n dll = get_dll()\r\n bytes_value = to_bytes(len(dll) + 5, 4, 'little')\r\n s_socket.send('u' + bytes_value + dll)",
"def set_type(self, index):\n self.set_type_obj(index)\n self.set_type_gui(index)\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def _validate_node_server_hardware_type(oneview_client, oneview_info):\n node_server_hardware_type_uri = oneview_info['server_hardware_type_uri']\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n server_hardware_sht_uri = server_hardware.get('serverHardwareTypeUri')\n\n if server_hardware_sht_uri != node_server_hardware_type_uri:\n message = _(\"Node server_hardware_type_uri is inconsistent \"\n \"with OneView's server hardware %(server_hardware_uri)s \"\n \"serverHardwareTypeUri.\") % {\n 'server_hardware_uri': server_hardware.get('uri')}\n raise exception.OneViewError(message)",
"def fusion_api_edit_server_hardware_mp_firmware_version(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers, param='/mpFirmwareVersion')",
"def set_eprom_type(eprom_type):\n command(eprom_type + \"S\")",
"async def _forcesettype(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a type!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n if god:\n godtypes = []\n for godTypeSet in botutils.godtypes:\n godtypes.append(godTypeSet[0])\n\n if args[1].upper() in godtypes:\n database.setType(god.ID, args[1].upper())\n await ctx.send(\"Set your God's type successfully!\")\n else:\n types_string = \"\"\n i = 1\n for godtype in godtypes:\n if i == 1:\n types_string = godtype\n else:\n types_string = types_string + \", \" + godtype\n i += 1\n await ctx.send(\"Please choose between these types: `\" + types_string + \"`!\")",
"def updateDeviceManagementInterface(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['devices', 'configure', 'managementInterface'],\n 'operation': 'updateDeviceManagementInterface'\n }\n resource = f'/devices/{serial}/managementInterface'\n\n body_params = ['wan1', 'wan2', ]\n payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}\n action = {\n \"resource\": resource,\n \"operation\": \"update\",\n \"body\": payload\n }\n return action",
"def update_firmware(self) -> str:",
"def update_server_engine_attributes(ServerName=None, AttributeName=None, AttributeValue=None):\n pass",
"def test_update_hyperflex_server_model(self):\n pass",
"def test_update_hyperflex_server_firmware_version(self):\n pass",
"def server_type_name(self):\n ...",
"async def _set(Type=None,*,thing=None):\n server = len(bot.servers)\n if Type is None:\n await bot.say('Usage: `.presence [game/stream] [message]`')\n else:\n if Type.lower() == 'stream':\n await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online')\n await bot.say('Set presence to. `Streaming {}`'.format(thing))\n elif Type.lower() == 'game':\n await bot.change_presence(game=discord.Game(name=thing))\n await bot.say('Set presence to `Playing {}`'.format(thing))\n elif Type.lower() == 'clear':\n await bot.change_presence(game=None)\n await bot.say('Cleared Presence')\n elif Type.lower() == 'servers':\n await bot.change_presence(game=discord.Game(name='with {} servers'.format(server)))\n await bot.say('**Im now playing with {} servers.**'.format(server))\n else:\n await bot.say('Usage: `.presence [game/stream] [message]`')"
] | [
"0.6469864",
"0.619745",
"0.60548294",
"0.59878653",
"0.57120275",
"0.5703278",
"0.56411195",
"0.5541107",
"0.5529781",
"0.55250376",
"0.53978205",
"0.53723806",
"0.5331305",
"0.531114",
"0.5274173",
"0.52472025",
"0.52413344",
"0.52305746",
"0.5228627",
"0.5217233",
"0.52132463",
"0.5185711",
"0.5160882",
"0.5091488",
"0.50831276",
"0.50671995",
"0.5062593",
"0.5035229",
"0.50258344",
"0.50128955"
] | 0.72509897 | 0 |
Gets a default or paginated collection of Server Hardware Types. [Arguments] | def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):
return self.types.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_servers_types():\n ret = _get_list(\n lambda server: server.type if server.type not in ['vanilla.winter', 'vanilla.desert', 'pvp'] else False,\n lambda server: server.type_name\n )\n\n # Extra server type filters\n ret.append({\n 'value': 'pacific+edelweiss',\n 'label': 'RWR: WWII DLCs'\n })\n\n return ret",
"def get_device_types():\n netAdminToolDB = app.config['DATABASE']\n\n device_types = netAdminToolDB.get_device_type()\n list = []\n for device_type in device_types:\n uri = url_for('get_device_type', device_type_id=device_type.id, _external=True)\n list.append({\n 'id': device_type.id,\n 'uri': uri,\n 'make': device_type.make,\n 'model': device_type.model,\n 'code': device_type.code\n })\n if list == []:\n return jsonify({'error': 'No device types found'}), 404\n\n return jsonify({'device_types': list})",
"def list_hardware(self, tags=None, cpus=None, memory=None, hostname=None,\r\n domain=None, datacenter=None, nic_speed=None,\r\n public_ip=None, private_ip=None, **kwargs):\r\n if 'mask' not in kwargs:\r\n hw_items = [\r\n 'id',\r\n 'hostname',\r\n 'domain',\r\n 'hardwareStatusId',\r\n 'globalIdentifier',\r\n 'fullyQualifiedDomainName',\r\n 'processorPhysicalCoreAmount',\r\n 'memoryCapacity',\r\n 'primaryBackendIpAddress',\r\n 'primaryIpAddress',\r\n 'datacenter',\r\n ]\r\n server_items = [\r\n 'activeTransaction[id, transactionStatus[friendlyName,name]]',\r\n ]\r\n\r\n kwargs['mask'] = '[mask[%s],' \\\r\n ' mask(SoftLayer_Hardware_Server)[%s]]' % \\\r\n (','.join(hw_items),\r\n ','.join(server_items))\r\n\r\n _filter = NestedDict(kwargs.get('filter') or {})\r\n if tags:\r\n _filter['hardware']['tagReferences']['tag']['name'] = {\r\n 'operation': 'in',\r\n 'options': [{'name': 'data', 'value': tags}],\r\n }\r\n\r\n if cpus:\r\n _filter['hardware']['processorPhysicalCoreAmount'] = \\\r\n query_filter(cpus)\r\n\r\n if memory:\r\n _filter['hardware']['memoryCapacity'] = query_filter(memory)\r\n\r\n if hostname:\r\n _filter['hardware']['hostname'] = query_filter(hostname)\r\n\r\n if domain:\r\n _filter['hardware']['domain'] = query_filter(domain)\r\n\r\n if datacenter:\r\n _filter['hardware']['datacenter']['name'] = \\\r\n query_filter(datacenter)\r\n\r\n if nic_speed:\r\n _filter['hardware']['networkComponents']['maxSpeed'] = \\\r\n query_filter(nic_speed)\r\n\r\n if public_ip:\r\n _filter['hardware']['primaryIpAddress'] = \\\r\n query_filter(public_ip)\r\n\r\n if private_ip:\r\n _filter['hardware']['primaryBackendIpAddress'] = \\\r\n query_filter(private_ip)\r\n\r\n kwargs['filter'] = _filter.to_dict()\r\n return self.account.getHardware(**kwargs)",
"def fusion_api_get_server_hardware(self, uri=None, param='', api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param=param)",
"def get_hd_types(self):\r\n return self._arm.get_hd_types()",
"def fusion_api_get_switch_types(self, param='', api=None, headers=None):\n return self.swtypes.get(api=api, headers=headers, param=param)",
"def _get_hardware_info(self) -> list:\n model = ctypes.create_string_buffer(8)\n model_size = ctypes.c_ulong(8)\n type_num = ctypes.c_ushort()\n channel_num = ctypes.c_ushort()\n notes = ctypes.create_string_buffer(48)\n notes_size = ctypes.c_ulong(48)\n firmware_version = ctypes.c_ulong()\n hardware_version = ctypes.c_ushort()\n modification_state = ctypes.c_ushort()\n\n ret = self._dll.LS_GetHardwareInfo(\n self._serial_number,\n ctypes.byref(model), model_size,\n ctypes.byref(type_num), ctypes.byref(channel_num),\n ctypes.byref(notes), notes_size, ctypes.byref(firmware_version),\n ctypes.byref(hardware_version), ctypes.byref(modification_state)\n )\n\n self._check_error(ret)\n return [model.value, type_num.value, channel_num.value,\n notes.value, firmware_version.value, hardware_version.value,\n modification_state.value]",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def do_command(self, args):\n ostypeops = dbops.OsTypes()\n listing = ostypeops.list(args)\n ordering = ['os_type_name']\n do_list(listing, ordering)",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def fusion_api_edit_server_hardware_types(self, body, uri, api=None, headers=None):\n return self.types.update(body, uri, api, headers)",
"def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)",
"def get_types(self):\n return self.types",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])",
"def get(self, *args):\n return _libsbml.ListOfSpeciesTypes_get(self, *args)",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def getHardware(self):\n return (self.vendorId, self.deviceId, self.physicalMemory, self.osInfo, self.cpuSpeed[0])",
"def getListOfSpeciesTypes(self, *args):\n return _libsbml.Model_getListOfSpeciesTypes(self, *args)",
"def test_get_hyperflex_server_model_list(self):\n pass",
"def get_hc_types(self):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_Health_Check_Type']\r\n return svc.getAllObjects()",
"def get_hypervisors(self):\n json_scheme = self.gen_def_json_scheme('GetHypervisors')\n json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)\n self.json_templates = json_obj\n d = dict(json_obj)\n for elem in d['Value']:\n hv = self.hypervisors[elem['HypervisorType']]\n for inner_elem in elem['Templates']:\n o = Template(hv)\n o.template_id = inner_elem['Id']\n o.descr = inner_elem['Description']\n o.id_code = inner_elem['IdentificationCode']\n o.name = inner_elem['Name']\n o.enabled = inner_elem['Enabled']\n if hv != 'SMART':\n for rb in inner_elem['ResourceBounds']:\n resource_type = rb['ResourceType']\n if resource_type == 1:\n o.resource_bounds.max_cpu = rb['Max']\n if resource_type == 2:\n o.resource_bounds.max_memory = rb['Max']\n if resource_type == 3:\n o.resource_bounds.hdd0 = rb['Max']\n if resource_type == 7:\n o.resource_bounds.hdd1 = rb['Max']\n if resource_type == 8:\n o.resource_bounds.hdd2 = rb['Max']\n if resource_type == 9:\n o.resource_bounds.hdd3 = rb['Max']\n self.templates.append(o)\n return True if json_obj['Success'] is 'True' else False",
"def devices(self):\n return list(self._device_types)",
"def select_host_characteristics(self):\n return IMPL.select_host_characteristics()",
"def Platforms():\n return platforms",
"def find_things(server, media_type):\n\n dict_tt = {name: [] for name in media_type}\n print('Finding items from {}.'.format(server.friendlyName))\n for section in server.library.sections():\n if section.title not in IGNORE_LST and section.type in media_type:\n for item in server.library.section(section.title).all():\n dict_tt[section.type].append(server.fetchItem(item.ratingKey))\n\n return dict_tt",
"def hardware(*args, brdType: bool=True, cpuType: bool=True, graphicsType: bool=True, megaHertz:\n bool=True, numProcessors: bool=True, **kwargs)->AnyStr:\n pass",
"def get_context_data(self):\n return {\"machine_types\": [{\n \"name\": machine_type.literal, \"machines\": list(machine_type.objects.all())\n } for machine_type in Machine.__subclasses__() if machine_type.objects.exists()]}",
"def ntypes(self): # -> list[None]:\n ..."
] | [
"0.63679874",
"0.61915565",
"0.60292554",
"0.60090137",
"0.5863595",
"0.57741106",
"0.5703842",
"0.56640977",
"0.55258626",
"0.5511348",
"0.54733056",
"0.5406356",
"0.53472906",
"0.5344607",
"0.5313471",
"0.53117824",
"0.53005385",
"0.5293233",
"0.52614915",
"0.52412504",
"0.52322507",
"0.5214751",
"0.519874",
"0.5161325",
"0.5157743",
"0.5147307",
"0.5137882",
"0.5130264",
"0.51276964",
"0.5107001"
] | 0.7453972 | 0 |
Deletes server hardware types in bulk based on name OR uri. If name AND uri are omitted, ALL shts are deleted. [Arguments] | def fusion_api_delete_server_hardware_types(self, name=None, uri=None, api=None, headers=None):
return self.types.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)",
"def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.delete(args)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def delete_all(submission_client, program, project, batch_size=200, types=['submitted_methylation', 'aliquot', 'sample', 'demographic', 'case', 'experiment']):\n for t in types:\n print('{}-{}.{}'.format(program, project, t))\n try:\n delete_type(submission_client, program, project, batch_size, t)\n except Exception as e:\n print(e)",
"def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)",
"def removeMultiSpeciesType(self, *args):\n return _libsbml.MultiModelPlugin_removeMultiSpeciesType(self, *args)",
"def delete_types(self, base_key, out_key, *types):\n self.params['%s.%s' % (base_key, out_key)] = delete_types(\n self.params[base_key], *types)",
"def fusion_api_remove_power_device(self, name=None, uri=None, api=None, headers=None):\n return self.pd.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def delete_types(self, base_key, out_key, *types):\n self.params[\"%s.%s\" % (base_key, out_key)] = self.delete_types_s(self.params[base_key], types)",
"def fusion_api_delete_ha_nodes(self, uri=None, api=None, headers=None):\n return self.ha_nodes.delete(uri, api, headers)",
"def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)",
"def remove_many_descriptors(self, uuids):",
"def delete_server(ServerName=None):\n pass",
"def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete_network_bulk(self, tenant_id, network_id_list, sync=False):",
"def fusion_api_edit_server_hardware_types(self, body, uri, api=None, headers=None):\n return self.types.update(body, uri, api, headers)",
"def delete(self, *names):\n\n return [shard.delete(*keys) for shard, keys\n in self.gather_keys_by_shard(names)]",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)",
"async def delete_routes(self, routes: Sequence[str]):\n exist_routes = await self.get_routes()\n removes = set(exist_routes).intersection(routes)\n storage: BaseStorage = await self._context.inject(BaseStorage)\n for route in removes:\n await storage.delete_record(\n StorageRecord(self.RECORD_TYPE, route, id=route)\n )",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]) and \\\n not all([i in kwargs for i in ('proto', 'port')]):\n raise TypeError('Expected host or port/proto pair.')\n self.dbdel('service', kwargs)",
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def RemoveObjsCommand(self, args, sub_opts=None, headers=None,\n debug=0):\n continue_on_error = False\n if sub_opts:\n for o, unused_a in sub_opts:\n if o == '-f':\n continue_on_error = True\n # Expand object name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.names_container():\n if uri.is_cloud_uri():\n # Before offering advice about how to do rm + rb, ensure those\n # commands won't fail because of bucket naming problems.\n boto.s3.connection.check_lowercase_bucketname(uri.bucket_name)\n uri_str = uri_str.rstrip('/\\\\')\n raise CommandException('\"rm\" command will not remove buckets. To '\n 'delete this/these bucket(s) do:\\n\\tgsutil rm '\n '%s/*\\n\\tgsutil rb %s' % (uri_str, uri_str))\n print 'Removing %s...' % uri\n try:\n uri.delete_key(validate=False, headers=headers)\n except Exception, e:\n if not continue_on_error:\n raise",
"def RemoveBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n # Expand bucket name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.object_name:\n raise CommandException('\"rb\" command requires a URI with no object '\n 'name')\n print 'Removing %s...' % uri\n uri.delete_bucket(headers)",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)"
] | [
"0.72597307",
"0.5941608",
"0.59314007",
"0.58576465",
"0.56981254",
"0.56143004",
"0.55945665",
"0.5569703",
"0.5556006",
"0.55467683",
"0.5541295",
"0.5474072",
"0.5456166",
"0.54501444",
"0.5449733",
"0.54203063",
"0.54069936",
"0.5361632",
"0.53535885",
"0.53339195",
"0.5318978",
"0.5312374",
"0.53023046",
"0.52959245",
"0.52382004",
"0.522867",
"0.5213411",
"0.5206382",
"0.5203377",
"0.5190991"
] | 0.7939834 | 0 |
Creates a Server Profile. [Arguments] | def fusion_api_create_server_profile(self, body, api=None, headers=None, param=''):
return self.profile.create(body, api, headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n created = 0\n already_exists = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is already existing\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n already_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_select_server_profile_template(profile.prof_temp)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if getattr(profile, 'hardwareType', None) is not None:\n hardware_type = profile.hardwareType\n\n if str(hardware_type)[:2:] == 'BL' or profile.server == 'unassigned':\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CreateServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfile.Advanced.set(profile)\n\n CreateServerProfile.click_create_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % profile.name)\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180, fail_if_false=False) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n if FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=timeout, fail_if_false=False) is True:\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok_or_warn(profile.name, timeout=180, fail_if_false=False) is True:\n logger.info(\"created server profile '%s' successfully\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_server_profile_status_ok_or_warn' = FALSE, skip to next profile ... \")\n continue\n else:\n logger.warn(\"'wait_activity_action_ok' = FALSE, skip to next profile ... \")\n FusionUIBase.show_activity_sidebar()\n continue\n else:\n logger.info(\"created server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_create_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n CreateServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n ui_lib.fail_test(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def create(*args, **kwargs):\n\n factory = V2ProfileFactory()\n output = factory.create(export_json=True)\n click.echo(output)",
"def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)",
"def fusion_api_create_server_profile_template(self, body, api=None, headers=None):\n return self.profile_template.create(body, api, headers)",
"def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)",
"def create_simple_server_profile_by_server_hardware(profile_name, server_name, return_true_if_exists=False):\n logger.info(\"--> creating a server profile with name '%s' ...\" % profile_name)\n # checking if the profile is already existing\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n if VerifyServerProfile.verify_server_profile_not_exist(profile_name, fail_if_false=False) is False:\n logger.warn(\"server profile '%s' already exists\" % profile_name)\n return return_true_if_exists\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_HARDWARE, time_for_loading=5)\n if VerifyHardware.verify_server_hardware_exist(server_name=server_name, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' does not exist\" % server_name)\n return False\n\n CommonOperationServerHardware.click_server_hardware(server_name=server_name, timeout=5, time_for_loading=5)\n FusionUIBase.select_view_by_name(view_name='Hardware', timeout=5, fail_if_false=False)\n if VerifyHardware.is_create_profile_link_available() is False:\n logger.warn(\"server hardware '%s' does NOT have 'Create profile' link to perform creating profile\" % server_name)\n return False\n\n CommonOperationServerHardware.click_create_profile_link(server_name=server_name)\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(name=profile_name)\n # CreateServerProfile.input_description(description=description)\n\n if VerifyServerProfile.is_power_on_error_visible_when_create_server_profile(server_name=server_name, timeout=5, fail_if_false=False) is True:\n if CreateServerProfile.click_power_off_link_from_powered_on_error(server_name=server_name, timeout=5, fail_if_false=False) is False:\n logger.warn(\"server hardware '%s' is powered on but failed to power it off, creating simple server profile will FAIL\" % server_name)\n return False\n\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(server_name)\n\n if sht_selected[:2:] == 'BL':\n # maybe other needs according to SHT in the future\n pass\n\n CreateServerProfile.click_create_button()\n err_msg_boot_mode = CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode()\n if err_msg_boot_mode is not None:\n logger.warn(\"error message: ['%s'] when creating profile '%s'\" % (err_msg_boot_mode, profile_name))\n if 'select a boot mode' in err_msg_boot_mode.strip().lower():\n logger.debug(\"trying to set 'Boot mode' as 'Legacy BIOS' to remove this error message ...\")\n CommonOperationServerProfile.BootSettings.select_boot_mode_legacy_bios()\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unknown error message, cannot continue to create simple server profile\")\n return False\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n return False\n # ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if FusionUIBase.wait_activity_action_ok(profile_name, 'Create', timeout=720, fail_if_false=True) is False:\n return False\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=180, fail_if_false=True) is False:\n return False\n logger.info(\"created simple server profile '%s' successfully\" % profile_name)\n return True",
"def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)",
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def create_network_profile(self, body=None):\r\n return self.post(self.network_profiles_path, body=body)",
"def create(profile, name):\n # Make sure it doesn't exist already.\n if exists(profile, name):\n msg = \"Instance profile '\" + str(name) + \"' already exists.\"\n raise ResourceAlreadyExists(msg)\n\n # Now we can create it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"create\", params)\n\n # Check that it exists.\n instance_profile_data = polling_fetch(profile, name)\n if not instance_profile_data:\n msg = \"Instance profile '\" + str(name) + \"' not created.\"\n raise ResourceNotCreated(msg)\n\n # Send back the instance profile's info.\n return instance_profile_data",
"def create_profile(options):\n\tfor k, v in default_profile.items():\n\t\toptions.setdefault(k, v)\n\t\n\treturn options",
"def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def create(\n name: str,\n from_name: str = typer.Option(None, \"--from\", help=\"Copy an existing profile.\"),\n):\n\n profiles = prefect.settings.load_profiles()\n if name in profiles:\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n [red]Profile {name!r} already exists.[/red]\n To create a new profile, remove the existing profile first:\n\n prefect profile delete {name!r}\n \"\"\"\n ).strip()\n )\n raise typer.Exit(1)\n\n if from_name:\n if from_name not in profiles:\n exit_with_error(f\"Profile {from_name!r} not found.\")\n\n # Create a copy of the profile with a new name and add to the collection\n profiles.add_profile(profiles[from_name].copy(update={\"name\": name}))\n else:\n profiles.add_profile(prefect.settings.Profile(name=name, settings={}))\n\n prefect.settings.save_profiles(profiles)\n\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n Created profile with properties:\n name - {name!r}\n from name - {from_name or None}\n\n Use created profile for future, subsequent commands:\n prefect profile use {name!r}\n\n Use created profile temporarily for a single command:\n prefect -p {name!r} config view\n \"\"\"\n )\n )",
"def make_ServerProfileTemplateV1(name=None,\n description=None,\n serverProfileDescription=None,\n serverHardwareTypeUri=None,\n enclosureGroupUri=None,\n affinity=None,\n hideUnusedFlexNics=None,\n profileConnectionV4=None,\n firmwareSettingsV3=None,\n bootSettings=None,\n bootModeSetting=None,\n sanStorageV3=None):\n return {\n 'type': 'ServerProfileTemplateV1',\n 'name': name,\n 'description': description,\n 'serverProfileDescription': serverProfileDescription,\n 'serverHardwareTypeUri': serverHardwareTypeUri,\n 'enclosureGroupUri': enclosureGroupUri,\n 'affinity': affinity,\n 'hideUnusedFlexNics': hideUnusedFlexNics,\n 'connections': profileConnectionV4,\n 'firmware': firmwareSettingsV3,\n 'boot': bootSettings,\n 'bootMode': bootModeSetting,\n 'sanStorage': sanStorageV3\n }",
"def create_profile(username):\n user = User.objects.create(username=username)\n return Profile.objects.create(user=user)",
"def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()",
"def create_profile(sender, **kwargs):\n\n # I import profile here cause i can't import it right in the top.\n from .profiles import Profile\n\n user = kwargs['instance']\n\n Profile.objects.get_or_create(user=user)",
"def fusion_api_get_server_profile_template_new_profile(self, uri, api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=\"/new-profile\")",
"def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()",
"def create_profile_from_template(*template_profile_obj):\n\n logger._log_to_console_and_log_file(\"Navigating to server profile template page...\")\n if not navigate():\n return False\n\n if isinstance(template_profile_obj, test_data.DataObj):\n template_profile_obj = [template_profile_obj]\n elif isinstance(template_profile_obj, tuple):\n template_profile_obj = list(template_profile_obj[0])\n\n for prof in template_profile_obj:\n\n \"\"\" Selecting profile template \"\"\"\n if not select_profile_template(prof.templ_name):\n ui_lib.fail_test(\"profile template is not present in template list\")\n\n logger._log_to_console_and_log_file(\"verifying for profile existence before proceeding to create\")\n if prof.has_property(\"prof_name\") and prof.prof_name.strip() != \"\":\n if serverprofiles.select_server_profile(prof.prof_name):\n ui_lib.fail_test(\"FAIL: Server profile '{0}' is already present\".format(prof.prof_name))\n else:\n ui_lib.fail_test(\"'prof_name' is a mandatory field and should not be empty\")\n\n logger._log_to_console_and_log_file(\"Powering of server '{0}\".format(prof.server))\n if prof.server.strip() != \"unassigned\" and not (serverhardware.power_off_server(prof.server)):\n ui_lib.fail_test(\"Can't proceed with server profile creation on server %s\" % prof.server)\n\n if not ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_PAGE_LABEL):\n if not navigate():\n ui_lib.fail_test(\"FAIL: failed to navigate profile template page\")\n\n logger._log_to_console_and_log_file(\"Selecting Create server profile option from Actions menu\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE)\n\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME)\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME, prof.prof_name)\n\n if prof.has_property(\"prof_description\") and prof.prof_description.strip() != \"\":\n logger._log_to_console_and_log_file(\"Entering profile description: '{0}'\".format(prof.prof_description))\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_DESCRIPTION, prof.prof_description)\n\n if prof.has_property(\"server\") and prof.server.strip() != \"\":\n logger._log_to_console_and_log_file(\"Selecting sever '{0}' to create profile\".format(prof.server))\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server):\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server)\n logger._log_to_console_and_log_file(\"Selected valid server hardware\")\n else:\n ui_lib.fail_test(\"Provided server '{0}' is not a valid\".format(prof.server))\n else:\n ui_lib.fail_test(\"'server' name is a mandatory field and should not be empty\")\n\n if prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'false':\n logger._log_to_console_and_log_file(\"Creating server profile from template without overriding template\")\n elif prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'true':\n logger._log_to_console_and_log_file(\"Creating server profile from template with overriding template\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_CHECKBOX_OVERRIDE_TEMPALTE)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE, PerfConstants.SELECT_ENCLOSURE * 3)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.SELECT_ENCLOSURE)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.WAIT_UNTIL_CONSTANT):\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n else:\n ui_lib.fail_test(ui_lib.get_text(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR))\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % prof.prof_name, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.ignore_staleElementRefException(\"_is_visible\", FusionServerProfilesPage.ID_PROFILE_CHANGING)\n logger._log_to_console_and_log_file(\"Waiting for profile creation to complete..\")\n\n logger._log_to_console_and_log_file(\"Validating profile %s\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ELEMENT_ACTIVITY % prof.prof_name):\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_OK, PerfConstants.CREATE_SERVER_PROFILE_TIME):\n logger._log_to_console_and_log_file(\"Profile template %s created\" % prof.prof_name)\n elif ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_WARNING):\n logger._warn(\"Profile %s created with warning\" % prof.prof_name)\n else:\n logger._warn(\"Failed to create server profile %s\" % prof.prof_name)\n return False\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n\n return True",
"def Create( profile_name,\r\n host,\r\n username=None,\r\n password=None,\r\n port=26,\r\n from_name=None,\r\n from_email=None,\r\n ssl=False,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n if not from_name and not from_email:\r\n raise CommandLine.UsageException(\"'from_name' or 'from_email' must be provided\")\r\n\r\n mailer = SmtpMailer( host,\r\n username=username,\r\n password=password,\r\n port=port,\r\n from_name=from_name,\r\n from_email=from_email,\r\n ssl=ssl,\r\n )\r\n mailer.Save(profile_name)\r\n\r\n output_stream.write(\"The profile '{}' has been created.\\n\".format(profile_name))",
"def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)",
"def create_my_profile(\n body: Optional[UserProfilePrivateCreate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def copy_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=8)\n\n total = len(profile_obj)\n not_exists = 0\n copied = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile with name '%s' ...\" % profile.source)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.source, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.source)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.source)\n CopyServerProfile.select_action_copy()\n CopyServerProfile.wait_copy_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfile.input_name(profile.name)\n CopyServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n\n if not CopyServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for copying server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip copying profile '%s' and continue to edit other server profiles\" % (profile.server, profile.source))\n continue\n msg = CopyServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be copied successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CopyServerProfile.get_selected_server_hardware_type(profile.server)\n if hasattr(profile, 'hardwareType'):\n hardware_type = profile.hardwareType\n else:\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n else:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.server)\n\n if str(hardware_type) not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CopyServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfile.Advanced.set(profile)\n\n CopyServerProfile.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfile.wait_copy_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=1800, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile.source, profile.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to copy! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, %s profile(s) left is failed being copied \" % (not_exists, total - copied - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def create_player_profile(sender, **kwargs):\n if kwargs.get('created') is True:\n PlayerProfile.objects.create(user=kwargs.get('instance'))",
"def validate_error_on_create_server_profile(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n total = len(profile_obj)\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n continue\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if hasattr(profile, 'Bandwidth_Error'):\n logger.info(\"change to 'Connections' view ...\")\n FusionUIBase.select_view_by_name('Connections')\n logger.info(\"start deleting connections ...\")\n total = len(profile.Connections)\n cls = CommonOperationServerProfile.Connection\n for n, connection in enumerate(profile.Connections):\n expected_message = profile.Bandwidth_Error\n logger.info(\"adding a connection with name '%s' ...\" % connection.name)\n if cls.verify_connection_not_exist(connection.name, fail_if_false=False) is False:\n logger.warn(\"connection '%s' already exists, skipped ...\" % connection.name)\n continue\n cls.click_add_connection_button()\n cls.wait_add_connection_dialog_shown(time_for_loading=3)\n cls.input_name(connection.name)\n cls.select_function_type_by_text(connection.FunctionType, timeout=10, fail_if_false=True)\n logger.info(\"Expected Error message is '%s' ...\" % expected_message)\n cls.input_select_network(connection.network)\n logger.info(\"n/w selected\")\n cls.input_select_port(connection.port)\n cls.input_requested_bandwidth(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_INPUT_REQUESTED_BANDWIDTH) else None\n cls.select_requested_bandwidth_by_text(connection.RequestedBandwidth) if ui_lib.is_visible(cls.e.ID_SELECTBOX_REQUESTED_BANDWIDTH) else None\n cls.click_add_button()\n if not VerifyServerProfile.verify_bandwidth_error(expected_message, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n cls.click_cancel_button()\n logger.info(\"clicked cancel button\")\n else:\n CommonOperationServerProfile.Connection.set(profile.Connections)\n CreateServerProfile.click_create_button()\n status, _ = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if hasattr(profile, 'update_error'):\n if not VerifyServerProfile.verify_error_message_for_update_action(profile.update_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n if not VerifyServerProfile.verify_error_message_in_add_connection(profile.connection_error, timeout=5, fail_if_false=True):\n logger.info(\"Validation failed\")\n else:\n logger.info(\"Error validation successful\")\n CreateServerProfile.click_cancel_button()\n else:\n logger.info(\"Profile created successfully\")\n return True",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)"
] | [
"0.73545814",
"0.7178512",
"0.6997477",
"0.683101",
"0.66478306",
"0.66430336",
"0.65632606",
"0.6467705",
"0.640401",
"0.635578",
"0.6292709",
"0.62823886",
"0.6270718",
"0.62524086",
"0.6238",
"0.62262017",
"0.61999977",
"0.61757547",
"0.6169726",
"0.616543",
"0.6158814",
"0.6136971",
"0.6128991",
"0.6091219",
"0.60590404",
"0.60485",
"0.60072505",
"0.59707654",
"0.59707654",
"0.59707654"
] | 0.7390086 | 0 |
Updates a Server Profile. [Arguments] | def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):
return self.profile.update(body, uri, api, headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def edit_server_profile_for_dl(profile_obj):\n # This keyword is deprecated, please do not use.\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n\n EditServerProfile.input_name(profile.newName)\n EditServerProfile.input_description(profile.desc)\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - verify the server hardware is refreshed to the type name displayed in the drop-down list for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully\")\n ui_lib.fail_test(msg)\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if profile.hardwaretype not in sht_selected:\n logger.warn(\"the server hardware type of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.hardwaretype))\n # set boot mode if attribute 'manageBootMode' is true - only for Gen 9 (or later) server:\n FusionUIBase.select_view_by_name('Boot Settings')\n if 'gen9' in sht_selected.lower():\n logger.info(\"setting 'Boot mode' for Gen 9 specially ...\")\n if getattr(profile, 'manageBootMode', '').lower() == 'true':\n CommonOperationServerProfile.BootSettings.tick_manage_boot_mode()\n CommonOperationServerProfile.BootSettings.select_boot_mode_by_text(profile.bootMode) if hasattr(profile, 'bootMode') else None\n if getattr(profile, 'bootMode', '').lower() == 'legacy bios':\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n else:\n CommonOperationServerProfile.BootSettings.set_non_legacy_bios_mode_boot_order(profile, hardware_type=sht_selected)\n else:\n CommonOperationServerProfile.BootSettings.untick_manage_boot_mode()\n else:\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n\n EditServerProfile.click_ok_button()\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being edited. \"\n \"Test will skip this profile '%s' and continue to edit other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.newName, 'Update', timeout=300, fail_if_false=False)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.newName, timeout=180, fail_if_false=False)\n logger.info(\"edited server profile '%s' successfully\" % profile.newName)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, hence test is considered PASS\" % not_exists)\n return True\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, hence test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, but %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def edit_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n # { below 3 lines were to avoid a failure caused by 2 CR that had been fixed. leave the 3 lines here as commented in case regression issue in future\n # will remove below once 2 CRs fixed\n # EditServerProfile.select_action_edit()\n # EditServerProfile.wait_edit_server_profile_dialog_shown()\n # EditServerProfile.click_cancel_button()\n # } here is a workaround for 1st time editing server profile (sp template as well) has defect that,\n # can't close dialog by OK/Cancel button, and SAN Storage's OS Type can't be read correctly,\n # so open dialog and use Cancel button to close, then everything goes well when 2nd time open Edit dialog\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n # 20151021 Alex Ma - discussed with Tony/Alex C and get below agreed:\n # - if 'hardwareType' is defined in test data, then will firstly select/change 'Server hardware type' from UI,\n # then select/change 'Server hardware' if 'server' is defined in test data\n # - if 'hardwareType' is not defined in test data, then will only check 'server' attribute to decide if select/change 'Server hardware' from UI\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n elif getattr(profile, 'ref_sht_server', None) is not None:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.ref_sht_server)\n if hardware_type not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(hardware_type, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if getattr(profile, 'enclgroup', None) is not None:\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfile.Advanced.set(profile)\n\n EditServerProfile.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=timeout, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n fail_if_not_ok = not getattr(profile, 'IgnoreWaitForStatusOK', '').lower() == 'true'\n # control whether to stop the case when server profile status is not ok.\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=500, fail_if_false=fail_if_not_ok)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n else:\n logger.info(\"edit server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n edited += 1\n else:\n logger.warn(\"'wait_edit_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n EditServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n ui_lib.fail_test(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def server_profile(self, server_profile):\n\n self._server_profile = server_profile",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)",
"def fusion_api_patch_server_profile(self, body, uri, api=None, headers=None):\n return self.profile.patch(body, uri, api, headers)",
"def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)",
"def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })",
"def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)",
"def update_my_profile(\n body: Optional[UserProfileUpdate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def update_profile_from_template(profile):\n selenium2lib = ui_lib.get_s2l()\n if not select_server_profile(profile):\n ui_lib.fail_test(\"Failed to select profile %s\" % profile)\n\n logger._log_to_console_and_log_file(\"power off server before updating profile from template\")\n profile_attributes = get_server_profile_attributes(profile, None)\n if profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile)\n elif profile_attributes[\"server power\"].lower() == \"on\":\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION)\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n ui_lib.fail_test(\"Power off option is not available in the Actions menu\")\n\n # Select update from template option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_UPDATE_FROM_TEMPLATE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MSG_TO_POWER_OFF_SERVER):\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n else:\n ui_lib.fail_test(\"Server should be powered off to update profile\")\n logger.debug(\"waiting for progress bar indicates to 'ok'\")\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_NOTIFICATION_OK, 300):\n logger._log_to_console_and_log_file(\"Server profile '%s' updated successfully from template\" % profile)\n return True\n else:\n ui_lib.fail_test(\"Failed to update server profile '%s' from template\" % profile)",
"def update_server_profile_firmware(*profile_obj):\n logger._log_to_console_and_log_file(\"Update firmware for Server Profiles\")\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n selenium2lib = ui_lib.get_s2l()\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.name not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.name)\n continue\n # Select & Edit Server Profile\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n # Adding firmware baseline\n if profile.has_property(\"manageFirmware\") and profile.manageFirmware == \"true\":\n logger._log_to_console_and_log_file(\"Selecting firmware baseline..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_DROPDOWN_BTN_FIRMWARE_BASELINE)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_COMBO_FIRMWARE_BASELINE_LIST % profile.spp)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_COMBO_FIRMWARE_BASELINE_LIST % profile.spp)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DROPDOWN_FIRMWARE_BASELINE)\n selectedFW = selenium2lib.get_text(FusionServerProfilesPage.ID_DROPDOWN_FIRMWARE_BASELINE)\n logger._log_to_console_and_log_file(\"Selected firmware is %s \" % selectedFW)\n if not selectedFW == profile.spp:\n logger._warn(\"Failed to select preferred firmware bundle..'\" + profile.spp + \"' at the edit page\")\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_BTN_CONFIRM_UPDATE_FIRMWARE, PerfConstants.PROFILE_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CONFIRM_UPDATE_FIRMWARE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MAIN_PAGE, PerfConstants.PROFILE_ACTIVITY):\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_POPUP, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n error_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_MSG)\n logger._warn(\"Selected Bay: '\" + profile.name + \"' has encountered an error with the message : '\" + error_msg + \"' , may be the hardware is being managed by another system\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_UPDATE_FIRMWARE)\n logger._log_to_console_and_log_file(\"Firmware Update canceled\")\n continue\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_CHANGING, PerfConstants.PROFILE_ACTIVITY):\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MAIN_PAGE)\n ui_lib.wait_for_element_visible(FusionDashboardPage.ID_LINK_ACTIVITY, PerfConstants.ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionDashboardPage.ID_LINK_ACTIVITY)\n if ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_PROGRESS % profile.name, PerfConstants.FIRMWARE_VALIDATION):\n start_time = selenium2lib.get_text(FusionServerProfilesPage.ID_NEW_ACTIVITY_TIMESTAMP % profile.name)\n logger._log_to_console_and_log_file(start_time)\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s started......... \" % profile.name)\n if ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_SUCCESS % (profile.name, start_time), PerfConstants.FIRMWARE_FAIL_PASS_VALIDATION):\n logger._log_to_console_and_log_file(\"Updating Server Profile Firmware %s done successfully\" % profile.name)\n elif ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_ERROR % (profile.name, start_time), PerfConstants.FIRMWARE_ERROR_VALIDATION):\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s done with errors\" % profile.name)\n else:\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s done with warnings\" % profile.name)\n else:\n logger._log_to_console_and_log_file(\"Selected Bay: '\" + profile.name + \"' has already been updated with the firmware baseline : '\" + profile.spp + \"'\")\n continue\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_UPDATE_FIRMWARE)\n logger._log_to_console_and_log_file(\"Firmware Update canceled\")",
"def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })",
"def update_profile(self, channels=None): # pragma: no cover\n pass",
"def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)",
"def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]",
"def update_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def _edit_server_hardware(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n if not selenium2lib._is_element_present(FusionServerHardwarePage.ID_PAGE_LABEL):\n base_page.navigate_base(FusionServerHardwarePage.ID_PAGE_LABEL,\n FusionUIBaseElements.ID_MENU_LINK_SERVER_HARDWARE, \"css=span.hp-page-item-count\")\n if not serverhardware.power_off_server_by_name(profile.server):\n logger._warn(\"Failed to powerOff the server %s\" % profile.server)\n logger._warn(\"Can't proceed with server profile creation on server %s\" % profile.server)\n continue\n # Navigating to Server profile page\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n ui_lib.wait_for_element(FusionUIBaseElements.ID_MAIN_MENU_CONTROL, PerfConstants.DEFAULT_SYNC_TIME)\n navigate()\n\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.profilename not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.profilename)\n continue\n if profile.server == \"\":\n logger._warn(\"Mandatory fields to edit server hardware can't be empty\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._log_to_console_and_log_file(\"Server is not powered off, and switching off now\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWER_PRESS_AND_HOLD)\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_SERVER_POWER_OFF_VALIDATE, PerfConstants.SERVER_POWER_OFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._warn(\"Failed to power off the server %s\" % profile.server)\n else:\n logger._log_to_console_and_log_file(\"Successfully server %s is powered off\" % profile.server)\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION)\n # New Code\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION):\n errMsg = selenium2lib._get_text(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION_CONTENT)\n logger._warn(errMsg)\n logger._warn(\"Unable to edit profile server hardware %s\" % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE)\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n strTimeStamp = selenium2lib._get_text(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n logger._log_to_console_and_log_file(strTimeStamp)\n\n # Verify profile server hardware updation status in server profile page (Under Activity tab)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp), PerfConstants.CREATE_SERVER_PROFILE_TIME)\n\n if selenium2lib._is_element_present(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp)):\n logger._log_to_console_and_log_file(\"Server profile '%s' is edited successfully\" % profile.profilename)\n else:\n logger._warn(\"Failed to edit server profile '%s' hardware\" % profile.profilename)",
"def copy_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=8)\n\n total = len(profile_obj)\n not_exists = 0\n copied = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile with name '%s' ...\" % profile.source)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.source, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.source)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.source)\n CopyServerProfile.select_action_copy()\n CopyServerProfile.wait_copy_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfile.input_name(profile.name)\n CopyServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n\n if not CopyServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for copying server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip copying profile '%s' and continue to edit other server profiles\" % (profile.server, profile.source))\n continue\n msg = CopyServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be copied successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CopyServerProfile.get_selected_server_hardware_type(profile.server)\n if hasattr(profile, 'hardwareType'):\n hardware_type = profile.hardwareType\n else:\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n else:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.server)\n\n if str(hardware_type) not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CopyServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfile.Advanced.set(profile)\n\n CopyServerProfile.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfile.wait_copy_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=1800, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile.source, profile.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to copy! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, %s profile(s) left is failed being copied \" % (not_exists, total - copied - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def update_apero_profile(params: Dict[str, Any], profile: int) -> Any:\n # deal with profile 1 or profile 2\n if profile == 1:\n profile_path = params['profile1']\n install_path = params.get('apero install 1', None)\n elif profile == 2:\n profile_path = params['profile2']\n install_path = params.get('apero install 2', None)\n else:\n emsg = 'profile must be 1 or 2'\n raise AperoCopyError(emsg)\n # use os to add DRS_UCONFIG to the path\n os.environ['DRS_UCONFIG'] = profile_path\n # allow getting apero\n if install_path is not None:\n sys.path.append(install_path)\n # load apero modules\n from apero.base import base\n from apero.core import constants\n from apero.core.constants import param_functions\n from apero.core.utils import drs_startup\n # reload DPARAMS and IPARAMS\n base.DPARAMS = base.load_database_yaml()\n base.IPARAMS = base.load_install_yaml()\n # ------------------------------------------------------------------\n apero_params = constants.load(cache=False)\n # invalidate cache\n param_functions.CONFIG_CACHE = dict()\n # set apero pid\n apero_params['PID'], apero_params['DATE_NOW'] = drs_startup.assign_pid()\n # no inputs\n apero_params['INPUTS'] = dict()\n apero_params['OBS_DIR'] = None\n # make sure parameters is reloaded (and not cached)\n return apero_params",
"def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass",
"def edit_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n edited = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n continue\n\n # get new server hardware type for edit\n enclosure_group = profile_template.enclgroup if getattr(profile_template, 'enclgroup', None) is not None else None\n sht_new = None\n if getattr(profile_template, 'new_sht_ref_server', None) is not None:\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.new_sht_ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_new = get_type_of_server_hardware(profile_template.new_sht_ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n elif getattr(profile_template, 'hardwareType', None) is not None:\n sht_new = profile_template.hardwareType\n\n # open Edit SPT dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.name)\n\n EditServerProfileTemplate.select_action_edit()\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfileTemplate.input_name(profile_template.newName) if getattr(profile_template, 'newName', None) is not None else None\n EditServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if sht_new is not None and sht_new not in sht_selected:\n logger.info(\"server hardware type '%s' is NOT consistent with current value '%s'\" % (sht_new, sht_selected))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(sht_new, enclosure_group, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfileTemplate.get_selected_enclosure_group()\n if enclosure_group is not None and enclosure_group not in eg_selected:\n logger.warn(\"enclosure group '%s' is NOT consistent with test data '%s'\" % (eg_selected, enclosure_group))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(enclosure_group, timeout=5, fail_if_false=False)\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n EditServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection().set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfileTemplate.Advanced.set(profile_template)\n\n EditServerProfileTemplate.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile_template.newName if getattr(profile_template, 'newName', None) is not None else profile_template.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to edit! all %s server profile template(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile template(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, %s profile template(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"async def test_update(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps({'name': 'new name'}).encode('utf-8')"
] | [
"0.6803491",
"0.67460173",
"0.66761166",
"0.66674477",
"0.65307075",
"0.64592654",
"0.6429802",
"0.6338764",
"0.63306314",
"0.63199794",
"0.6299867",
"0.6223881",
"0.6159896",
"0.6155287",
"0.6135783",
"0.6112417",
"0.6095837",
"0.60589975",
"0.60271895",
"0.6017078",
"0.6003341",
"0.5944146",
"0.5924049",
"0.5906811",
"0.5870628",
"0.58649594",
"0.58610964",
"0.5844467",
"0.58306974",
"0.58253765"
] | 0.72424406 | 0 |
Deletes server profiles in bulk based on name OR uri. If name AND uri are omitted, ALL profiles are deleted. [Arguments] | def fusion_api_delete_server_profile(self, name=None, uri=None, param='', api=None, headers=None):
return self.profile.delete(name=name, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bak_delete_all_appliance_server_profiles():\n selenium2lib = ui_lib.get_s2l()\n \"\"\" Navigate to Network Page \"\"\"\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n # get the list of networks\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_PROFILE_LIST)\n delete_server_profile([el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)])",
"def fusion_api_delete_server_profile_template(self, name=None, uri=None, api=None, headers=None):\n return self.profile_template.delete(name, uri, api, headers)",
"def people_delete(self, profiles=None, query_params=None, timezone_offset=None, ignore_alias=True, backup=True,\n backup_file=None):\n return self.people_operation('$delete', '', profiles=profiles, query_params=query_params,\n timezone_offset=timezone_offset, ignore_alias=ignore_alias, backup=backup,\n backup_file=backup_file)",
"def delete_all_appliance_server_profiles(wait_ongoing_task_complete=False):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n profile_name_list = CommonOperationServerProfile.get_server_profile_list()\n if wait_ongoing_task_complete is True:\n CommonOperationServerProfile.wait_server_profile_task_complete()\n\n total = len(profile_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_name in enumerate(profile_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile named '%s'\" % profile_name)\n if not VerifyServerProfile.verify_server_profile_exist(profile_name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile_name)\n not_exists += 1\n else:\n if not delete_server_profile_by_name(profile_name, force_delete=True):\n logger.warn(\"server profile '%s' is NOT deleted successfully.\" % profile_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to delete! all %s server profile(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being deleted, %s profile(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def UnshareProfiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n batch_size = max(self.batch_size, 100)\n index = 0\n result = BatchResult()\n while index < len(self._profiles):\n request_feed = gdata.contacts.data.ProfilesFeed()\n for entry in self._profiles[index:index + batch_size]:\n entry.status = gdata.contacts.data.Status(indexed='false')\n request_feed.AddUpdate(entry=entry)\n result_feed = self._gd_client.ExecuteBatchProfiles(request_feed)\n for entry in result_feed.entry:\n if entry.batch_status.code == '200':\n self._profiles[index] = entry\n result.success_count += 1\n else:\n result.error_entries.append(entry)\n result.error_count += 1\n index += 1\n return result",
"def RemoveBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n # Expand bucket name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.object_name:\n raise CommandException('\"rb\" command requires a URI with no object '\n 'name')\n print 'Removing %s...' % uri\n uri.delete_bucket(headers)",
"def delete_network_profile(arn=None):\n pass",
"def test_delete_driver_profile_in_list():\n tester = TestClass()\n driver_profiles = tester.populate_driver_profile_list()\n\n assert len(driver_profiles) == 2\n assert driver_profiles[1].get_driver_profile_name() == 'testDriverName'\n\n tester.delete_driver_profile()\n\n assert len(driver_profiles) == 1\n assert driver_profiles[0].get_driver_profile_name() == 'Default'",
"def test_delete_spawning_profile_in_list():\n tester = TestClass()\n spawning_profiles = tester.populate_spawning_profile_list()\n\n assert len(spawning_profiles) == 2\n assert spawning_profiles[1].get_spawning_profile_name() == 'testSpawnName'\n\n tester.delete_spawning_profile()\n\n assert len(spawning_profiles) == 1\n assert spawning_profiles[0].get_spawning_profile_name() == 'Default'",
"def test_delete_driver_profile_not_in_list():\n tester = TestClass()\n driver_profiles = tester.delete_driver_profile()\n\n assert driver_profiles\n assert len(driver_profiles) == 1\n assert driver_profiles[0].get_driver_profile_name() == 'Default'",
"def unset(cls, client, resource, args) :\n try :\n if type(resource) is not list :\n unsetresource = nshttpprofile()\n if type(resource) != type(unsetresource):\n unsetresource.name = resource\n else :\n unsetresource.name = resource.name\n return unsetresource.unset_resource(client, args)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i].name\n result = cls.unset_bulk_request(client, unsetresources, args)\n return result\n except Exception as e :\n raise e",
"def delete_network_bulk(self, tenant_id, network_id_list, sync=False):",
"def people_remove(self, value, profiles=None, query_params=None, timezone_offset=None, ignore_alias=False,\n backup=True, backup_file=None):\n return self.people_operation('$remove', value=value, profiles=profiles, query_params=query_params,\n timezone_offset=timezone_offset, ignore_alias=ignore_alias, backup=backup,\n backup_file=backup_file)",
"def test_delete_spawning_profile_not_in_list():\n tester = TestClass()\n spawning_profiles = tester.delete_spawning_profile()\n\n assert spawning_profiles\n\n assert len(spawning_profiles) == 1\n\n assert spawning_profiles[0].get_spawning_profile_name() == 'Default'",
"def delete(name: str):\n profiles = prefect.settings.load_profiles()\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n current_profile = prefect.context.get_settings_context().profile\n if current_profile.name == name:\n exit_with_error(\n f\"Profile {name!r} is the active profile. You must switch profiles before\"\n \" it can be deleted.\"\n )\n\n profiles.remove_profile(name)\n\n verb = \"Removed\"\n if name == \"default\":\n verb = \"Reset\"\n\n prefect.settings.save_profiles(profiles)\n exit_with_success(f\"{verb} profile {name!r}.\")",
"def RemoveObjsCommand(self, args, sub_opts=None, headers=None,\n debug=0):\n continue_on_error = False\n if sub_opts:\n for o, unused_a in sub_opts:\n if o == '-f':\n continue_on_error = True\n # Expand object name wildcards, if any.\n for uri_str in args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if uri.names_container():\n if uri.is_cloud_uri():\n # Before offering advice about how to do rm + rb, ensure those\n # commands won't fail because of bucket naming problems.\n boto.s3.connection.check_lowercase_bucketname(uri.bucket_name)\n uri_str = uri_str.rstrip('/\\\\')\n raise CommandException('\"rm\" command will not remove buckets. To '\n 'delete this/these bucket(s) do:\\n\\tgsutil rm '\n '%s/*\\n\\tgsutil rb %s' % (uri_str, uri_str))\n print 'Removing %s...' % uri\n try:\n uri.delete_key(validate=False, headers=headers)\n except Exception, e:\n if not continue_on_error:\n raise",
"def delete_all_appliance_server_profile_templates():\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n profile_template_name_list = CommonOperationServerProfileTemplate.get_server_profile_template_list()\n\n total = len(profile_template_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_template_name in enumerate(profile_template_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template_name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template_name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template_name)\n not_exists += 1\n else:\n if not delete_server_profile_template_by_name(profile_template_name):\n logger.warn(\"server profile template '%s' is NOT deleted successfully.\" % profile_template_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def fusion_api_delete_hypervisor_cluster_profile(self, name=None, uri=None, api=None, headers=None):\n return self.cluster_profile.delete(name, uri, api, headers)",
"def delete_network_profile(self, profile):\r\n return self.delete(self.network_profile_path % profile)",
"def delete_tenant_bulk(self, tenant_list, sync=False):",
"def deduplicate_people(self, profiles=None, prop_to_match='$email', merge_props=False, case_sensitive=False,\n backup=True, backup_file=None):\n main_reference = {}\n update_profiles = []\n delete_profiles = []\n\n if profiles is not None:\n profiles_list = Mixpanel._list_from_argument(profiles)\n else:\n # Unless the user provides a list of profiles we only look at profiles which have the prop_to_match set\n selector = '(boolean(properties[\"' + prop_to_match + '\"]) == true)'\n profiles_list = self.query_engage({'where': selector})\n\n if backup:\n if backup_file is None:\n backup_file = \"backup_\" + str(int(time.time())) + \".json\"\n self.export_data(profiles_list, backup_file, append_mode=True)\n\n for profile in profiles_list:\n try:\n match_prop = str(profile[\"$properties\"][prop_to_match])\n except UnicodeError:\n match_prop = profile[\"$properties\"][prop_to_match].encode('utf-8')\n except KeyError:\n continue\n finally:\n try:\n if not case_sensitive:\n match_prop = match_prop.lower()\n except NameError:\n pass\n\n # Ensure each value for the prop we are matching on has a key pointing to an array in the main_reference\n if not main_reference.get(match_prop):\n main_reference[match_prop] = []\n\n # Append each profile to the array under the key corresponding to the value it has for prop we are matching\n main_reference[match_prop].append(profile)\n\n for matching_prop, matching_profiles in main_reference.items():\n if len(matching_profiles) > 1:\n matching_profiles.sort(key=lambda dupe: Mixpanel._dt_from_iso(dupe))\n # We create a $delete update for each duplicate profile and at the same time create a\n # $set_once update for the keeper profile by working through duplicates oldest to newest\n if merge_props:\n prop_update = {\"$distinct_id\": matching_profiles[-1][\"$distinct_id\"], \"$properties\": {}}\n for x in range(len(matching_profiles) - 1):\n delete_profiles.append({'$distinct_id': matching_profiles[x]['$distinct_id']})\n if merge_props:\n prop_update[\"$properties\"].update(matching_profiles[x][\"$properties\"])\n # Remove $last_seen from any updates to avoid weirdness\n if merge_props and \"$last_seen\" in prop_update[\"$properties\"]:\n del prop_update[\"$properties\"][\"$last_seen\"]\n if merge_props:\n update_profiles.append(prop_update)\n\n # The \"merge\" is really just a $set_once call with all of the properties from the deleted profiles\n if merge_props:\n self.people_operation('$set_once', lambda p: p['$properties'], profiles=update_profiles, ignore_alias=True,\n backup=False)\n\n return self.people_operation('$delete', '', profiles=delete_profiles, ignore_alias=True, backup=False)",
"def del_dups(exproot, **kwargs):\n seen_args = []\n seen_names = []\n for jobname, args, results in load_all(exproot):\n if args in seen_args:\n if os.listdir(os.path.join(exproot, jobname)) == [args_filename]:\n print jobname, 'is empty dup of', seen_names[seen_args.index(args)],\n print '... deleting'\n os.remove(os.path.join(exproot, jobname, args_filename))\n os.rmdir(os.path.join(exproot, jobname))\n else:\n print jobname, 'is dup with files of', seen_names[seen_args.index(args)]\n elif args != None:\n seen_args.append(args)\n seen_names.append(jobname)",
"def Delete(url):\n\n prefix = ''.join([url, config_encoder.NAMESPACE_SEPARATOR])\n\n # Remove Test Suites\n test_keys = _GetEntityKeysByPrefix(ndb_models.Test, prefix)\n ndb.delete_multi(test_keys)\n\n # Remove Device Actions\n device_action_keys = _GetEntityKeysByPrefix(ndb_models.DeviceAction, prefix)\n ndb.delete_multi(device_action_keys)\n\n # Remove Test Run Actions\n test_run_action_keys = _GetEntityKeysByPrefix(\n ndb_models.TestRunAction, prefix)\n ndb.delete_multi(test_run_action_keys)\n\n # Remove Config Set Info\n config_set_info_key = mtt_messages.ConvertToKey(ndb_models.ConfigSetInfo, url)\n config_set_info_key.delete()",
"def delete_empty_profile(face_profile_directory):\n for face_profile in os.listdir(face_profile_directory):\n if \".\" not in str(face_profile):\n face_profile = os.path.join(face_profile_directory, face_profile)\n index = 0\n for the_file in os.listdir(face_profile):\n file_path = os.path.join(face_profile, the_file)\n if file_path.endswith(\".png\") or file_path.endswith(\".jpg\") or file_path.endswith(\".jpeg\") or file_path.endswith(\".pgm\"):\n index += 1\n if index == 0 : \n shutil.rmtree(face_profile)\n print(\"\\nDeleted \", face_profile, \" because it contains no images\")\n if index < 2 : \n logging.error(\"\\nFace profile \" + str(face_profile) + \" contains too little images (At least 2 images are needed)\")",
"def DeleteWiredNetworkProfile(self, profilename):\n profilename = misc.to_unicode(profilename)\n print \"Deleting wired profile for \" + str(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n config.remove_section(profilename)\n else:\n return \"500: Profile does not exist\"\n config.write(open(self.wired_conf, \"w\"))\n return \"100: Profile Deleted\"",
"def delete(cls, client, resource) :\n try :\n if type(resource) is not list :\n deleteresource = nshttpprofile()\n if type(resource) != type(deleteresource):\n deleteresource.name = resource\n else :\n deleteresource.name = resource.name\n return deleteresource.delete_resource(client)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n deleteresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n deleteresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n deleteresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n deleteresources[i].name = resource[i].name\n result = cls.delete_bulk_request(client, deleteresources)\n return result\n except Exception as e :\n raise e",
"async def test_delete(self):\n rsps = respx.delete(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.delete_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'",
"def delete(self, *names):\n\n return [shard.delete(*keys) for shard, keys\n in self.gather_keys_by_shard(names)]",
"def remove_many_descriptors(self, uuids):",
"def unset(cls, client, resource, args) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tunsetresource = lbprofile()\n\t\t\t\tif type(resource) != type(unsetresource):\n\t\t\t\t\tunsetresource.lbprofilename = resource\n\t\t\t\telse :\n\t\t\t\t\tunsetresource.lbprofilename = resource.lbprofilename\n\t\t\t\treturn unsetresource.unset_resource(client, args)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lbprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].lbprofilename = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lbprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].lbprofilename = resource[i].lbprofilename\n\t\t\t\tresult = cls.unset_bulk_request(client, unsetresources, args)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e"
] | [
"0.6577274",
"0.65147924",
"0.61541474",
"0.59987676",
"0.566046",
"0.5585588",
"0.5585084",
"0.55501384",
"0.5521699",
"0.5495129",
"0.54838026",
"0.54756314",
"0.5430477",
"0.5426814",
"0.54195154",
"0.54032445",
"0.54020315",
"0.5356326",
"0.53332573",
"0.5325975",
"0.5302437",
"0.52800155",
"0.52756214",
"0.5244322",
"0.5199649",
"0.51797897",
"0.51764774",
"0.51617134",
"0.5160034",
"0.5140883"
] | 0.6795457 | 0 |
Patches a Server Profile. [Arguments] | def fusion_api_patch_server_profile(self, body, uri, api=None, headers=None):
return self.profile.patch(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def patch(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('patch',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })",
"def server_profile(self, server_profile):\n\n self._server_profile = server_profile",
"def patch(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('patch',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })",
"def patch(self,\n ipfix_l2_profile_id,\n i_pfix_l2_profile,\n ):\n return self._invoke('patch',\n {\n 'ipfix_l2_profile_id': ipfix_l2_profile_id,\n 'i_pfix_l2_profile': i_pfix_l2_profile,\n })",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_profile_from_template(profile):\n selenium2lib = ui_lib.get_s2l()\n if not select_server_profile(profile):\n ui_lib.fail_test(\"Failed to select profile %s\" % profile)\n\n logger._log_to_console_and_log_file(\"power off server before updating profile from template\")\n profile_attributes = get_server_profile_attributes(profile, None)\n if profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile)\n elif profile_attributes[\"server power\"].lower() == \"on\":\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION)\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n ui_lib.fail_test(\"Power off option is not available in the Actions menu\")\n\n # Select update from template option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_UPDATE_FROM_TEMPLATE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MSG_TO_POWER_OFF_SERVER):\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n else:\n ui_lib.fail_test(\"Server should be powered off to update profile\")\n logger.debug(\"waiting for progress bar indicates to 'ok'\")\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_NOTIFICATION_OK, 300):\n logger._log_to_console_and_log_file(\"Server profile '%s' updated successfully from template\" % profile)\n return True\n else:\n ui_lib.fail_test(\"Failed to update server profile '%s' from template\" % profile)",
"def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)",
"def edit_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n # { below 3 lines were to avoid a failure caused by 2 CR that had been fixed. leave the 3 lines here as commented in case regression issue in future\n # will remove below once 2 CRs fixed\n # EditServerProfile.select_action_edit()\n # EditServerProfile.wait_edit_server_profile_dialog_shown()\n # EditServerProfile.click_cancel_button()\n # } here is a workaround for 1st time editing server profile (sp template as well) has defect that,\n # can't close dialog by OK/Cancel button, and SAN Storage's OS Type can't be read correctly,\n # so open dialog and use Cancel button to close, then everything goes well when 2nd time open Edit dialog\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n # 20151021 Alex Ma - discussed with Tony/Alex C and get below agreed:\n # - if 'hardwareType' is defined in test data, then will firstly select/change 'Server hardware type' from UI,\n # then select/change 'Server hardware' if 'server' is defined in test data\n # - if 'hardwareType' is not defined in test data, then will only check 'server' attribute to decide if select/change 'Server hardware' from UI\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n elif getattr(profile, 'ref_sht_server', None) is not None:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.ref_sht_server)\n if hardware_type not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(hardware_type, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if getattr(profile, 'enclgroup', None) is not None:\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfile.Advanced.set(profile)\n\n EditServerProfile.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=timeout, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n fail_if_not_ok = not getattr(profile, 'IgnoreWaitForStatusOK', '').lower() == 'true'\n # control whether to stop the case when server profile status is not ok.\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=500, fail_if_false=fail_if_not_ok)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n else:\n logger.info(\"edit server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n edited += 1\n else:\n logger.warn(\"'wait_edit_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n EditServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n ui_lib.fail_test(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def edit_server_profile_for_dl(profile_obj):\n # This keyword is deprecated, please do not use.\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n\n EditServerProfile.input_name(profile.newName)\n EditServerProfile.input_description(profile.desc)\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - verify the server hardware is refreshed to the type name displayed in the drop-down list for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully\")\n ui_lib.fail_test(msg)\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if profile.hardwaretype not in sht_selected:\n logger.warn(\"the server hardware type of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.hardwaretype))\n # set boot mode if attribute 'manageBootMode' is true - only for Gen 9 (or later) server:\n FusionUIBase.select_view_by_name('Boot Settings')\n if 'gen9' in sht_selected.lower():\n logger.info(\"setting 'Boot mode' for Gen 9 specially ...\")\n if getattr(profile, 'manageBootMode', '').lower() == 'true':\n CommonOperationServerProfile.BootSettings.tick_manage_boot_mode()\n CommonOperationServerProfile.BootSettings.select_boot_mode_by_text(profile.bootMode) if hasattr(profile, 'bootMode') else None\n if getattr(profile, 'bootMode', '').lower() == 'legacy bios':\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n else:\n CommonOperationServerProfile.BootSettings.set_non_legacy_bios_mode_boot_order(profile, hardware_type=sht_selected)\n else:\n CommonOperationServerProfile.BootSettings.untick_manage_boot_mode()\n else:\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n\n EditServerProfile.click_ok_button()\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being edited. \"\n \"Test will skip this profile '%s' and continue to edit other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.newName, 'Update', timeout=300, fail_if_false=False)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.newName, timeout=180, fail_if_false=False)\n logger.info(\"edited server profile '%s' successfully\" % profile.newName)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, hence test is considered PASS\" % not_exists)\n return True\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, hence test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, but %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)",
"def copy_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=8)\n\n total = len(profile_obj)\n not_exists = 0\n copied = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile with name '%s' ...\" % profile.source)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.source, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.source)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.source)\n CopyServerProfile.select_action_copy()\n CopyServerProfile.wait_copy_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfile.input_name(profile.name)\n CopyServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n\n if not CopyServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for copying server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip copying profile '%s' and continue to edit other server profiles\" % (profile.server, profile.source))\n continue\n msg = CopyServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be copied successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CopyServerProfile.get_selected_server_hardware_type(profile.server)\n if hasattr(profile, 'hardwareType'):\n hardware_type = profile.hardwareType\n else:\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n else:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.server)\n\n if str(hardware_type) not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CopyServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfile.Advanced.set(profile)\n\n CopyServerProfile.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfile.wait_copy_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=1800, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile.source, profile.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to copy! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, %s profile(s) left is failed being copied \" % (not_exists, total - copied - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def update_server_profile_firmware(*profile_obj):\n logger._log_to_console_and_log_file(\"Update firmware for Server Profiles\")\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n selenium2lib = ui_lib.get_s2l()\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.name not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.name)\n continue\n # Select & Edit Server Profile\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n # Adding firmware baseline\n if profile.has_property(\"manageFirmware\") and profile.manageFirmware == \"true\":\n logger._log_to_console_and_log_file(\"Selecting firmware baseline..\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_DROPDOWN_BTN_FIRMWARE_BASELINE)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_COMBO_FIRMWARE_BASELINE_LIST % profile.spp)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_COMBO_FIRMWARE_BASELINE_LIST % profile.spp)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DROPDOWN_FIRMWARE_BASELINE)\n selectedFW = selenium2lib.get_text(FusionServerProfilesPage.ID_DROPDOWN_FIRMWARE_BASELINE)\n logger._log_to_console_and_log_file(\"Selected firmware is %s \" % selectedFW)\n if not selectedFW == profile.spp:\n logger._warn(\"Failed to select preferred firmware bundle..'\" + profile.spp + \"' at the edit page\")\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_BTN_CONFIRM_UPDATE_FIRMWARE, PerfConstants.PROFILE_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CONFIRM_UPDATE_FIRMWARE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MAIN_PAGE, PerfConstants.PROFILE_ACTIVITY):\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_POPUP, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n error_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_MSG)\n logger._warn(\"Selected Bay: '\" + profile.name + \"' has encountered an error with the message : '\" + error_msg + \"' , may be the hardware is being managed by another system\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_UPDATE_FIRMWARE)\n logger._log_to_console_and_log_file(\"Firmware Update canceled\")\n continue\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_CHANGING, PerfConstants.PROFILE_ACTIVITY):\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MAIN_PAGE)\n ui_lib.wait_for_element_visible(FusionDashboardPage.ID_LINK_ACTIVITY, PerfConstants.ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionDashboardPage.ID_LINK_ACTIVITY)\n if ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_PROGRESS % profile.name, PerfConstants.FIRMWARE_VALIDATION):\n start_time = selenium2lib.get_text(FusionServerProfilesPage.ID_NEW_ACTIVITY_TIMESTAMP % profile.name)\n logger._log_to_console_and_log_file(start_time)\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s started......... \" % profile.name)\n if ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_SUCCESS % (profile.name, start_time), PerfConstants.FIRMWARE_FAIL_PASS_VALIDATION):\n logger._log_to_console_and_log_file(\"Updating Server Profile Firmware %s done successfully\" % profile.name)\n elif ui_lib.wait_for_element(FusionServerProfilesPage.ID_NEW_ACTIVITY_ERROR % (profile.name, start_time), PerfConstants.FIRMWARE_ERROR_VALIDATION):\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s done with errors\" % profile.name)\n else:\n logger._log_to_console_and_log_file(\"Update Server Profile Firmware %s done with warnings\" % profile.name)\n else:\n logger._log_to_console_and_log_file(\"Selected Bay: '\" + profile.name + \"' has already been updated with the firmware baseline : '\" + profile.spp + \"'\")\n continue\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_UPDATE_FIRMWARE)\n logger._log_to_console_and_log_file(\"Firmware Update canceled\")",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def patch(self,\n ipfix_l2_collector_profile_id,\n i_pfix_l2_collector_profile,\n ):\n return self._invoke('patch',\n {\n 'ipfix_l2_collector_profile_id': ipfix_l2_collector_profile_id,\n 'i_pfix_l2_collector_profile': i_pfix_l2_collector_profile,\n })",
"def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)",
"def patch(self,\n port_mirroring_profile_id,\n port_mirroring_profile,\n ):\n return self._invoke('patch',\n {\n 'port_mirroring_profile_id': port_mirroring_profile_id,\n 'port_mirroring_profile': port_mirroring_profile,\n })",
"def verify_can_edit_server_profile_general_info_when_server_power_on(profile_obj):\n\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile.name)\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n # Server hardware must be \"power\" on status\n if not VerifyServerProfile.is_power_on_error_visible_when_edit_server_profile(profile.server, 10):\n logger.warn(\"Server hardware '%s' is not 'Powered on, please power on it\" % profile.server)\n continue\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=False)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n logger.warn(\"Only connection name is allowed to modification\")\n # add connections\n CommonOperationServerProfile.Connection().set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.warn(\"Modify the 'BootSettings' section will return error when server power on, so ignore this setting\")\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.warn(\"Modify the 'Advanced' section will return error when server power on, so ignore this setting\")\n\n EditServerProfile.click_ok_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200",
"def patch(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('patch',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def patch(self,\n dpd_profile_id,\n ip_sec_vpn_dpd_profile,\n ):\n return self._invoke('patch',\n {\n 'dpd_profile_id': dpd_profile_id,\n 'ip_sec_vpn_dpd_profile': ip_sec_vpn_dpd_profile,\n })",
"def patch(self,\n ipfix_dfw_profile_id,\n i_pfixdfw_profile,\n ):\n return self._invoke('patch',\n {\n 'ipfix_dfw_profile_id': ipfix_dfw_profile_id,\n 'i_pfixdfw_profile': i_pfixdfw_profile,\n })",
"def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)",
"def update_apero_profile(params: Dict[str, Any], profile: int) -> Any:\n # deal with profile 1 or profile 2\n if profile == 1:\n profile_path = params['profile1']\n install_path = params.get('apero install 1', None)\n elif profile == 2:\n profile_path = params['profile2']\n install_path = params.get('apero install 2', None)\n else:\n emsg = 'profile must be 1 or 2'\n raise AperoCopyError(emsg)\n # use os to add DRS_UCONFIG to the path\n os.environ['DRS_UCONFIG'] = profile_path\n # allow getting apero\n if install_path is not None:\n sys.path.append(install_path)\n # load apero modules\n from apero.base import base\n from apero.core import constants\n from apero.core.constants import param_functions\n from apero.core.utils import drs_startup\n # reload DPARAMS and IPARAMS\n base.DPARAMS = base.load_database_yaml()\n base.IPARAMS = base.load_install_yaml()\n # ------------------------------------------------------------------\n apero_params = constants.load(cache=False)\n # invalidate cache\n param_functions.CONFIG_CACHE = dict()\n # set apero pid\n apero_params['PID'], apero_params['DATE_NOW'] = drs_startup.assign_pid()\n # no inputs\n apero_params['INPUTS'] = dict()\n apero_params['OBS_DIR'] = None\n # make sure parameters is reloaded (and not cached)\n return apero_params",
"def bak_power_off_server_profile(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n logger._log_to_console_and_log_file(\"\")\n error = 0\n valid_profile_no = 0\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Powering off server profile '%s'\" % profile.name)\n\n # Validate server profiles\n logger._log_to_console_and_log_file(\"Validating Server Profile\")\n profile_name = profile.name\n\n # for profile_name in profile_names:\n profile_attributes = get_server_profile_attributes(profile_name, None)\n if profile_attributes is None:\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Server Profile '%s' does not exist\" % profile_name)\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n elif profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile_name)\n continue\n\n elif profile_attributes[\"server power\"] == \"Off\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Server Profile '%s' is already powered off\" % profile_name)\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n else:\n valid_profile_no += 1\n\n # Select the profile from the left side table\n\n logger._log_to_console_and_log_file(\"Powering off Server Profile\")\n if not select_server_profile(profile.name):\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Failed to select server profiles\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select Power off option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if selenium2lib._is_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profile\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION):\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profile\")\n else:\n selenium2lib.capture_page_screenshot()\n logger._warn('Timeout for wait server profile is powered off')\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n else:\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Power off option is not available in the Actions menu\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Build Activity Message\n args = {}\n args[\"activity\"] = \"Power Off\"\n args[\"entity\"] = get_server_profile_attributes(profile_name, \"server hardware\")\n # logger._log_to_console_and_log_file(args[\"entity\"])\n args[\"multiple\"] = 0\n\n # Verify Activity\n if not _verify_activity(**args):\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Failed to verify Power Off Activity\")\n selenium2lib.capture_page_screenshot()\n error += 1\n else:\n logger._log_to_console_and_log_file(\"Successfully verified Power Off Activity for Powering Off Profile(s): '%s'\" % profile.name)\n\n if error > 0:\n return False\n return True",
"def patch(self,\n ipfix_collector_profile_id,\n i_pfix_collector_profile,\n ):\n return self._invoke('patch',\n {\n 'ipfix_collector_profile_id': ipfix_collector_profile_id,\n 'i_pfix_collector_profile': i_pfix_collector_profile,\n })",
"def bak_power_on_server_profile(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n logger._log_to_console_and_log_file(\"\")\n error = 0\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n valid_profiles = []\n excluded_profiles = []\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Powering on server profiles '%s'\" % profile.name)\n\n # Validate server profiles\n logger._log_to_console_and_log_file(\"Validating Server Profiles\")\n profile_names = _split_profile_names(profile.name)\n for profile_name in profile_names:\n profile_attributes = get_server_profile_attributes(profile_name, None)\n if profile_attributes is None:\n logger._warn(\"Server Profile '%s' does not exist\" % profile_name)\n selenium2lib.capture_page_screenshot()\n return False\n elif profile_attributes[\"server hardware\"] == \"unassigned\":\n logger._warn(\"Cannot power on Server Profile '%s' due to unassigned server hardware\" % profile_name)\n excluded_profiles.append(profile_name)\n elif profile_attributes[\"server power\"] == \"On\":\n logger._warn(\"Server Profile '%s' is already powered on\" % profile_name)\n excluded_profiles.append(profile_name)\n else:\n valid_profiles.append(profile_name)\n\n if len(valid_profiles) == 0:\n logger._warn(\"All specified Server Profiles are already powered on.\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select the profile from the left side table\n logger._log_to_console_and_log_file(\"Powering on Server Profiles\")\n if not select_server_profile(profile.name):\n logger._warn(\"Failed to select server profiles\")\n selenium2lib.capture_page_screenshot()\n error += 1\n continue\n\n # Select Power off option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if selenium2lib._is_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWERON):\n logger._log_to_console_and_log_file(\"Powering on selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWERON)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"On\", PerfConstants.PROFILE_POWER_VALIDATION)\n BuiltIn().sleep(10)\n logger._log_to_console_and_log_file(\"Successfully powered on Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n logger._log_to_console_and_log_file(\"Power on option is not available in the Actions menu\")\n selenium2lib.capture_page_screenshot()\n error += 1\n\n # Build Activity Message\n args = {}\n args[\"activity\"] = \"Power On\"\n args[\"entity\"] = get_server_profile_attributes(profile_names[0], \"server hardware\") if len(profile_names) == 1 else \"%d server hardware\" % len(profile_names)\n args[\"multiple\"] = len(profile_names) - 1\n if args[\"multiple\"]:\n args[\"completed\"] = valid_profiles if len(valid_profiles) > 1 else [valid_profiles[0]]\n if len(excluded_profiles) > 0:\n args[\"excluded\"] = excluded_profiles if len(excluded_profiles) > 1 else [excluded_profiles[0]]\n\n # Verify Activity\n if not _verify_activity(**args):\n logger._warn(\"Failed to verify Power On Activity\")\n selenium2lib.capture_page_screenshot()\n error += 1\n else:\n logger._log_to_console_and_log_file(\"Successfully verified Power On Activity for Powering On Profile(s): '%s'\" % profile.name)\n\n if error > 0:\n return False\n return True",
"def update_server(DisableAutomatedBackup=None, BackupRetentionCount=None, ServerName=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None):\n pass",
"def reset_server_profiles(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n off_or_unsupported = 0\n not_exists = 0\n done_reset = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), len(profile_obj), '-' * 14))\n logger.info(\"reset a server profile named '%s'\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n CommonOperationServerProfile.click_server_profile(profile_name=profile.name, time_for_loading=2)\n if VerifyServerProfile.verify_general_server_power(expect_value='On', timeout=5, fail_if_false=False) is False:\n logger.warn(\"Power state of server profile '%s' is not 'On', 'RESET' action is unavailable.\" % profile.name)\n off_or_unsupported += 1\n else:\n if reset_server_profile_by_name(profile.name) is False:\n logger.warn(\"server profile '%s' is NOT done reset successfully\" % profile.name)\n continue\n else:\n done_reset += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - off_or_unsupported - not_exists == 0:\n logger.warn(\"no server profile to reset! all %s server profile(s) is NOT applicable to reset (already powered off/unsupported/not existing), test is considered PASS\" % off_or_unsupported)\n return True\n else:\n if done_reset < total:\n logger.warn(\"not all of these server profile is successfully reset - %s out of %s reset \" % (done_reset, total))\n if done_reset + off_or_unsupported + not_exists == total:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, test is considered PASS\" % (off_or_unsupported, not_exists))\n return True\n else:\n logger.warn(\"%s off-or-unsupported server profile(s) is skipped, %s not-existing server profile(s) is skipped, \"\n \"%s left is failed being reset \" % (off_or_unsupported, not_exists, total - done_reset - off_or_unsupported - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully done reset - %s out of %s \" % (done_reset, total))\n return True",
"def _edit_server_hardware(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n if not selenium2lib._is_element_present(FusionServerHardwarePage.ID_PAGE_LABEL):\n base_page.navigate_base(FusionServerHardwarePage.ID_PAGE_LABEL,\n FusionUIBaseElements.ID_MENU_LINK_SERVER_HARDWARE, \"css=span.hp-page-item-count\")\n if not serverhardware.power_off_server_by_name(profile.server):\n logger._warn(\"Failed to powerOff the server %s\" % profile.server)\n logger._warn(\"Can't proceed with server profile creation on server %s\" % profile.server)\n continue\n # Navigating to Server profile page\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n ui_lib.wait_for_element(FusionUIBaseElements.ID_MAIN_MENU_CONTROL, PerfConstants.DEFAULT_SYNC_TIME)\n navigate()\n\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.profilename not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.profilename)\n continue\n if profile.server == \"\":\n logger._warn(\"Mandatory fields to edit server hardware can't be empty\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._log_to_console_and_log_file(\"Server is not powered off, and switching off now\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWER_PRESS_AND_HOLD)\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_SERVER_POWER_OFF_VALIDATE, PerfConstants.SERVER_POWER_OFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._warn(\"Failed to power off the server %s\" % profile.server)\n else:\n logger._log_to_console_and_log_file(\"Successfully server %s is powered off\" % profile.server)\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION)\n # New Code\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION):\n errMsg = selenium2lib._get_text(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION_CONTENT)\n logger._warn(errMsg)\n logger._warn(\"Unable to edit profile server hardware %s\" % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE)\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n strTimeStamp = selenium2lib._get_text(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n logger._log_to_console_and_log_file(strTimeStamp)\n\n # Verify profile server hardware updation status in server profile page (Under Activity tab)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp), PerfConstants.CREATE_SERVER_PROFILE_TIME)\n\n if selenium2lib._is_element_present(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp)):\n logger._log_to_console_and_log_file(\"Server profile '%s' is edited successfully\" % profile.profilename)\n else:\n logger._warn(\"Failed to edit server profile '%s' hardware\" % profile.profilename)"
] | [
"0.63377327",
"0.6052345",
"0.5898319",
"0.58592176",
"0.576985",
"0.57568383",
"0.57484424",
"0.56993264",
"0.56706333",
"0.56687814",
"0.5656452",
"0.55615526",
"0.5515507",
"0.55121225",
"0.53895104",
"0.537306",
"0.5353549",
"0.5347112",
"0.529797",
"0.52950746",
"0.5281274",
"0.52777153",
"0.5266162",
"0.5259422",
"0.52508366",
"0.5231649",
"0.5228997",
"0.52139246",
"0.520332",
"0.5195596"
] | 0.6953699 | 0 |
Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available to a server profile along with their respective ports. [Arguments] | def fusion_api_get_server_profiles_available_networks(self, uri=None, param='', api=None, headers=None):
param = '/available-networks%s' % param
return self.profile.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))",
"def show_networks():\n return get_networks()",
"def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data",
"def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def fusion_api_get_ethernet_networks(self, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.get(uri=uri, api=api, headers=headers, param=param)",
"def list():\n\n\treturn netifaces.interfaces()",
"def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)",
"def list_network_profiles(self, **params):\r\n return self.get(self.network_profiles_path, params=params)",
"def get_net_interfaces():\n import netifaces\n return netifaces.interfaces()",
"def getNets(self):\n\t\treturn NetLoader.listNetworks()",
"def _ifList(self):\n bNetworks = False\n for cmd in self.lstCmd[1:]:\n if cmd == 'networks' or cmd == 'n':\n bNetworks = True\n\n print 'enum interfaces ...'\n with self.wlan.enumInterfaces() as wlanIfData:\n # find each available network for each interface\n # for n,iface in enumerate(wlanIfData.ifaces):\n for n,iface in enumerate(wlanIfData):\n print \"%d : %-40s state:%s\" % (n,iface.strInterfaceDescription, iface.getState())\n if bNetworks:\n with self.wlan.getAvailableNetworks(iface) as wlanNetData:\n print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')\n print ' %-15s %-30s %-15s' % ('=======', '====','==========')\n for nw in wlanNetData:\n sConn = ' '\n sDesc = ''\n if nw.isConnectable():\n sDesc += 'C'\n if nw.isSecure():\n sDesc += 'S'\n if nw.isConnected():\n sConn = '*'\n if nw.hasProfile():\n sDesc += 'P'\n print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)",
"def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()",
"def linux():\n command = \"cat /etc/NetworkManager/system-connections/*\"\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n return networks",
"def list_network_profiles(arn=None, type=None, nextToken=None):\n pass",
"def list_networks(session):\n # type: (Session) -> List[Dict[str, Any]]\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n return _get_list(session, url_tail)",
"def list_networks():\n return __sets.keys()",
"def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)",
"def fetch_router_list(args):\n nd = NetDevices(production_only=opts.nonprod)\n ret = []\n blocked_groups = []\n if args:\n for arg in args:\n # Try to find the device, but fail gracefully if it can't be found\n device = device_match(arg)\n if not pass_filters(device) or device is None:\n continue\n ret.append(device)\n\n else:\n for entry in nd.itervalues():\n if entry.owningTeam in blocked_groups:\n continue\n if not pass_filters(entry):\n continue\n ret.append(entry)\n\n return sorted(ret, reverse=True)",
"def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)",
"def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]",
"def get_network_interfaces(self):\n return self.mycam.devicemgmt.GetNetworkInterfaces()",
"def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))",
"def netstat(self):\n \n command = 'netstat -utn'\n lines = subprocess.check_output(command, shell=True).split('\\n')[2:]\n \n\tports = {'tcp':[], 'udp':[]}\n\tfor line in lines:\n\t if len(line) < 4:\n\t continue\n\t\t\n\t words = line.split()\n\t port = int(words[3].split(':')[-1])\n\t lst = ports[words[0]]\n\t if port in lst:\n\t continue\n\t lst.append(port)\n\t \n\tports['tcp'].sort()\n\tports['udp'].sort()\n\t\n\treturn ports",
"def do_nic_list(cc, args):\n nics = cc.nic.list()\n names = ['%s (uuid) %s (mac)' % (nic.get('uuid'), nic.get('mac')) for nic in\n nics['nics']]\n cliutils.print_list(names, args.json)",
"def getSDDCnetworks(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n json_response = get_cgw_segments_json(proxy, sessiontoken)\n if json_response != False:\n sddc_networks = json_response['results']\n table = PrettyTable(['Name', 'id', 'Type', 'Network', 'Default Gateway'])\n table_extended = PrettyTable(['Name', 'id','Tunnel ID'])\n for i in sddc_networks:\n if ( i['type'] == \"EXTENDED\"):\n table_extended.add_row([i['display_name'], i['id'], i['l2_extension']['tunnel_id']])\n elif ( i['type'] == \"DISCONNECTED\"):\n table.add_row([i['display_name'], i['id'], i['type'],\"-\", \"-\"])\n else:\n table.add_row([i['display_name'], i['id'], i['type'], i['subnets'][0]['network'], i['subnets'][0]['gateway_address']])\n print(\"Routed Networks:\")\n print(table)\n print(\"Extended Networks:\")\n print(table_extended)\n else:\n print(\"Something went wrong, please try again.\")\n sys.exit(1)",
"def windows10():\n command = \"netsh wlan show profile\"\n # requires .decode(\"utf-8\") if using python3\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n\n # find all network profiles and display it as a list\n network_names = re.findall(\"(?:Profile\\s*:\\s)(.*)\", networks)\n\n result = \"\"\n for profile_name in network_names:\n command = \"netsh wlan show profile \" + profile_name + \" key=clear\"\n # requires .decode(\"utf-8\") if using python3\n current_result = subprocess.check_output(command,\n shell=True).decode(\"utf-8\")\n result += current_result\n return result",
"def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")"
] | [
"0.6963224",
"0.66442364",
"0.66117615",
"0.65916544",
"0.6482594",
"0.6468238",
"0.6336594",
"0.6286763",
"0.62354195",
"0.61999875",
"0.6190109",
"0.61853945",
"0.617893",
"0.6166727",
"0.611604",
"0.60293716",
"0.5998868",
"0.59686875",
"0.5967716",
"0.5966022",
"0.5935942",
"0.59201944",
"0.59120095",
"0.5902676",
"0.5892864",
"0.5858844",
"0.5854889",
"0.585283",
"0.58483756",
"0.58302003"
] | 0.7096709 | 0 |
Retrieves the list of the storage systems and their associated volumes that are available to the server profile based on the given server hardware type and enclosure group. [Arguments] | def fusion_api_get_server_profiles_available_storage_systems(self, uri=None, param='', api=None, headers=None):
param = '/available-storage-systems%s' % param
return self.profile.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_storage_providers_2(self, **kw):\n return (200, {}, {\"storage_provider\":\n {\n \"backend_type\": \"svc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C30\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 873.5,\n \"total_capacity_gb\": 1115.5,\n \"storage_hostname\": \"shared_v7000_1\",\n \"id\": 2,\n \"backend_state\": \"running\"\n }})",
"def get_storage_providers_detail(self, **kw):\n return (200, {}, {\"storage_providers\": [\n {\n \"backend_type\": \"svc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C30\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 873.5,\n \"total_capacity_gb\": 1115.5,\n \"storage_hostname\": \"shared_v7000_1\",\n \"id\": 2,\n \"backend_state\": \"running\"\n },\n {\n \"backend_type\": \"fc\",\n \"volume_count\": \"null\",\n \"service\": {\n \"host_display_name\": \"shared_v7000_1\",\n \"host\": \"shared_v7000_1\",\n \"id\": 4\n },\n \"backend_id\": \"00000200A0204C31\",\n \"health_status\": {\n \"health_value\": \"OK\"\n },\n \"free_capacity_gb\": 73.5,\n \"total_capacity_gb\": 115.5,\n \"storage_hostname\": \"shared_v7000_2\",\n \"id\": 3,\n \"backend_state\": \"running\"\n }\n ]})",
"def fusion_api_get_server_profiles_available_storage_system(self, uri=None, param='', api=None, headers=None):\n param = '/available-storage-system%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))",
"def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)",
"def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def get_storage_devices(vm_name, filter='vd[a-z]'):\n vm_executor = get_vm_executor(vm_name)\n\n command = 'ls /sys/block | egrep \\\"%s\\\"' % filter\n rc, output, error = vm_executor.run_cmd(cmd=shlex.split(command))\n if rc:\n logger.error(\n \"Error while retrieving storage devices from VM '%s, output is \"\n \"'%s', error is '%s'\", output, error\n )\n return False\n return output.split()",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def get_devices_lsscsi(self):\n\n try:\n message = \"Find SCSI Devices\"\n if self._include_enclosures:\n command = \"lsscsi --generic --transport | egrep 'disk|0x14|enclo'\"\n else:\n command = \"lsscsi --generic --transport | fgrep 'disk|0x14'\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n #\n # Format:\n # $ lsscsi --generic --transport\n # [0] [1] [2] [3] [4]\n # [0:0:0:0] disk sas:0x5000cca25103b471 /dev/sda /dev/sg0 \n # [0:0:1:0] disk sas:0x5000cca251029301 /dev/sdb /dev/sg1 \n # ...\n # [0:0:14:0] enclosu sas:0x5001636001caa0bd - /dev/sg14\n # [7:0:0:0] cd/dvd usb: 1-1.3:1.2 /dev/sr0 /dev/sg15\n #\n # Special Case:\n # Handle lines without a transport (spaces only). (screen scrapping danger)\n # [0:0:10:0] enclosu sas:0x50030480091d71fd - /dev/sg10\n # [1:0:0:0] disk <spaces> /dev/sdk /dev/sg11 <- INTEL disk!\n #\n # Another SNAFU! (and why I hate screen scrapping!!!)\n # [15:0:53597:0]disk sas:0x5000cca23b359649 /dev/sdg /dev/sg6 \n # [15:0:53598:0]disk sas:0x5000cca23b0c0a99 /dev/sdh /dev/sg7 \n # [15:0:53599:0]disk sas:0x5000cca23b0b7531 /dev/sdi /dev/sg8 \n # ...\n # [15:0:53686:0]enclosu sas:0x5000ccab040001bc - /dev/sg165\n # [15:0:53766:0]enclosu sas:0x5000ccab040001fc - /dev/sg144\n #\n # Evidently, the author of lsscsi did not think of consistent output! ;(\n #\n for line in pdata['stdout'].splitlines():\n dinfo = line.split()\n device = dict()\n if len(dinfo) < 5:\n m = re.search('(?P<device>disk|\\(0x14\\)|enclosu)', dinfo[0])\n if m:\n device['Device Type'] = m.group('device')\n sas_index = 1\n dev_index = 2\n sg_index = 3\n else:\n continue\n else:\n device['Device Type'] = dinfo[1]\n sas_index = 2\n dev_index = 3\n sg_index = 4\n\n # lsscsi does not understand 'Host Managed' device type.\n if '0x14' in device['Device Type']:\n device['Device Type'] = 'disk'\n\n # Parse remaining information.\n if 'sas:' in dinfo[sas_index]:\n device['SAS Address'] = dinfo[sas_index][4:]\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: Enclosure has no driver, so reports '-' for name.\n if '/dev/' in dinfo[dev_index]:\n if self._drives and not dinfo[dev_index] in self._drives:\n continue\n if self._exclude and dinfo[dev_index] in self._exclude:\n continue\n device['Linux Device Name'] = dinfo[dev_index]\n else:\n device['Linux Device Name'] = \"\"\n if '/dev/sg' in dinfo[sg_index]:\n device['SCSI Device Name'] = dinfo[sg_index]\n else:\n device['SCSI Device Name'] = \"\"\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def subcmd_getstorage_main(args, parameter_info):\n \n from get_storage_inventory import get_storage_inventory\n result = get_storage_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def find_all():\n return ItopapiPrototype.find_all(ItopapiStorageSystem)",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage",
"def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"def ListVdisks(self, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks\"\n return self.client.get(uri, None, headers, query_params, content_type)",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def getStorageTypes(self, show_all=False):\n types = getStorageTypes()\n if not show_all:\n types = [x for x in types if x['interface'].providedBy(self)]\n return types",
"def installStorage():\n for name,data in Online.SetupParams.detectors.items():\n s = data['System']\n c = Online.PVSSSystems.controlsMgr(s)\n inst = Installer(c)\n nf = data['SubFarms']\n streams = data['StorageStreams']\n inst.createStorage(name,streams,nf)\n return c",
"def _get_manageable_vols(self, cinder_resources, resource_type,\n marker, limit, offset, sort_keys,\n sort_dirs):\n\n # We can't translate a backend volume name into a Cinder id\n # directly, so we create a map to do it.\n volume_name_to_id = {}\n for resource in cinder_resources:\n key = self._get_backend_volume_name(resource['id'], resource_type)\n value = resource['id']\n volume_name_to_id[key] = value\n\n self.client_login()\n try:\n vols = self.client.get_volumes(filter_type=resource_type)\n except stx_exception.RequestError as ex:\n LOG.exception(\"Error getting manageable volumes.\")\n raise exception.Invalid(ex)\n finally:\n self.client_logout()\n\n entries = []\n for vol in vols.values():\n vol_info = {'reference': {'source-name': vol['name']},\n 'size': vol['size'],\n 'cinder_id': None,\n 'extra_info': None}\n\n potential_id = volume_name_to_id.get(vol['name'])\n if potential_id:\n vol_info['safe_to_manage'] = False\n vol_info['reason_not_safe'] = 'already managed'\n vol_info['cinder_id'] = potential_id\n elif vol['mapped']:\n vol_info['safe_to_manage'] = False\n vol_info['reason_not_safe'] = '%s in use' % resource_type\n else:\n vol_info['safe_to_manage'] = True\n vol_info['reason_not_safe'] = None\n\n if resource_type == 'snapshot':\n origin = vol['parent']\n vol_info['source_reference'] = {'source-name': origin}\n\n entries.append(vol_info)\n\n return volume_utils.paginate_entries_list(entries, marker, limit,\n offset, sort_keys, sort_dirs)",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def get_all_storage(life):\n\treturn [items.get_item_from_uid(item) for item in life['inventory'] if 'max_capacity' in items.get_item_from_uid(item)]",
"def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)",
"def list_filesystem(self, headers=None, **kwargs):\n logger.debug('Listing filesystem ...')\n resource = 'account'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._get(params=params, headers=headers)\n return response.json() if response.content else {}",
"def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs",
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()"
] | [
"0.65530187",
"0.6485663",
"0.63755476",
"0.58408225",
"0.5750117",
"0.57142246",
"0.56885105",
"0.5686779",
"0.5672562",
"0.56290096",
"0.56214625",
"0.561331",
"0.5579664",
"0.5546426",
"0.5521108",
"0.5521108",
"0.5520313",
"0.55181736",
"0.55035865",
"0.5490816",
"0.5487372",
"0.5464381",
"0.5437918",
"0.540851",
"0.5405306",
"0.5393352",
"0.53861594",
"0.5369101",
"0.5366722",
"0.53573126"
] | 0.6621092 | 0 |
Retrieves a list of the target servers and empty device bays that are available for assignment to the server profile. This replaces the /rest/serverprofiles/availableservers API. [Arguments] | def fusion_api_get_server_profiles_available_targets(self, uri=None, param='', api=None, headers=None):
param = '/available-targets%s' % param
return self.profile.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_servers():\n (code, message) = rest_api.list_servers(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def fusion_api_get_server_profiles_available_networks(self, uri=None, param='', api=None, headers=None):\n param = '/available-networks%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def get_reachable_servers(self) -> List[Server]:\n pass",
"def Servers(self, server=None):\n if server:\n self.current = server\n return \"successful\"\n\n servers = []\n for x in XbmcServers.select():\n servers.append({'name': x.name, 'id': x.id})\n if len(servers) < 1:\n return\n return {'current': self.current, 'servers': servers}",
"def list_servers(active=True):\n params = {'active': 1} if active else {}\n servers_response = requests.get('https://bootstrap.fetch.ai/networks/', params=params)\n if servers_response.status_code != 200:\n raise requests.ConnectionError('Failed to get network status from bootstrap')\n\n return servers_response.json()",
"def get_servers(self):\n url = '%s/servers/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['servers']\n else:\n LOG.error('Get servers failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def _get_assigned_server_for_profile():\n selenium2lib = ui_lib.get_s2l()\n serverprofiledict = {}\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n\n for profobj in profile_list:\n if not select_server_profile(profobj):\n ui_lib.fail_test(\"Exiting function get assigned server, Not selected profile %s\" % profobj)\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_SELECTOR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_DROPDOWN_SELECT % 'Overview')\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_HARDWARE)\n strhardware = selenium2lib._get_text(FusionServerProfilesPage.ID_SERVER_HARDWARE)\n if strhardware != 'unassigned' and ('empty' not in strhardware):\n serverprofiledict[profobj] = strhardware\n return serverprofiledict",
"def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()",
"def list_availability_server(self, feed_id, server_id, **kwargs):\n metric_id = self._metric_id_availability_server(feed_id=feed_id, server_id=server_id)\n return self.list_availability(metric_id=metric_id, **kwargs)",
"def servers(self):\n response = self._request(\"GET\", [ROUTE_SERVERS])\n\n return CBWParser().parse_response(CBWServer, response)",
"def list_servers(self, request, tenant_id):\n server_name = ''\n if 'name' in request.args:\n server_name = request.args['name'][0]\n response_data = list_server(tenant_id, server_name, details=False)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])",
"def list_servers(self, all_tenants=False):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers/detail\"\n if all_tenants:\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + self.project_info[\n \"project_id\"] + \"/servers/detail?all_tenants=1\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while listing servers.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List servers Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Servers List :%s \" % output)\n return output[\"servers\"]",
"def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')",
"def get_server_list():\n\n if file_downloaded(output_file):\n server_list = load_server_list_json()\n printer('Server list loaded from JSON')\n\n #server_list = load_server_list_json()\n #printer('Server list loaded from JSON')\n\n else:\n # Connect to RS\n rsconn = object\n rsconn = connect()\n\n # Store the JSON response from list_servers\n printer(\"Fetching server list from Rackspace...\")\n\n server_list = rsconn.list_servers(detailed = DETAILED)\n save_server_list_json(server_list)\n\n printer('Server list loaded via API call')\n\n return server_list",
"def get_servers(self):\n\t\treturn self.__servers",
"def describe_servers(ServerName=None, NextToken=None, MaxResults=None):\n pass",
"def list_servers(self, request, paginate):\n raise NotImplementedError",
"def servers(self, details=True, **query):\n srv = _server.ServerDetail if details else _server.Server\n return list(self._list(srv, paginated=True, **query))",
"def servers(self):\n return self._servers",
"def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers",
"def bak_delete_all_appliance_server_profiles():\n selenium2lib = ui_lib.get_s2l()\n \"\"\" Navigate to Network Page \"\"\"\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n # get the list of networks\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_PROFILE_LIST)\n delete_server_profile([el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)])",
"def get_all_servers(self) -> List[Server]:\n pass",
"def list_servers(self, request):\n token = request.form.get('token')\n if token is None:\n token = request.args.get('token')\n\n rest_client = RestClient.instance()\n if (not rest_client.validate_token(token)):\n return (401, 'Unauthorized')\n\n game_servers = GameServers.instance().get_servers()\n out = []\n for game_server in game_servers.values():\n out.append({\n 'name': game_server.get_name(),\n 'host': game_server.get_host(),\n 'port': game_server.get_port(),\n 'owner': game_server.get_owner()\n })\n return (200, json.dumps(out))",
"async def servers(self, ctx):\n # [p]servers\n\n owner = ctx.message.author\n servers = list(self.bot.servers)\n server_list = {}\n msg = \"\"\n for i in range(0, len(servers)):\n server_list[str(i)] = servers[i]\n msg += \"{}: {}\\n\".format(str(i), servers[i].name)\n msg += \"\\nTo leave a server just type its number.\"\n for page in pagify(msg, ['\\n']):\n await self.bot.say(page)\n while msg is not None:\n msg = await self.bot.wait_for_message(author=owner, timeout=15)\n if msg is not None:\n msg = msg.content.strip()\n if msg in server_list.keys():\n await self.leave_confirmation(server_list[msg], owner, ctx)\n else:\n break\n else:\n break",
"def partner_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]]:\n return pulumi.get(self, \"partner_servers\")",
"def fusion_api_get_server_profiles_available_storage_systems(self, uri=None, param='', api=None, headers=None):\n param = '/available-storage-systems%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def get_available_dedicated_server_packages(self):\r\n\r\n package_obj = self.client['Product_Package']\r\n packages = []\r\n\r\n # Pull back only server packages\r\n mask = 'id,name,description,type'\r\n _filter = {\r\n 'type': {\r\n 'keyName': {\r\n 'operation': 'in',\r\n 'options': [\r\n {'name': 'data',\r\n 'value': ['BARE_METAL_CPU', 'BARE_METAL_CORE']}\r\n ],\r\n },\r\n },\r\n }\r\n\r\n for package in package_obj.getAllObjects(mask=mask, filter=_filter):\r\n # Filter out packages without a name or that are designated as\r\n # 'OUTLET.' The outlet packages are missing some necessary data\r\n # and their orders will fail.\r\n if package.get('name') and 'OUTLET' not in package['description']:\r\n packages.append((package['id'], package['name'],\r\n package['description']))\r\n\r\n return packages",
"def partner_servers(self) -> pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]:\n return pulumi.get(self, \"partner_servers\")",
"def _scoped_servers(self):\n\n # If project scoped explicitly set the project list\n projects = None if utils.all_projects() else [pecan.request.token.project_id]\n\n # Must do a detailed search here as it returns the tenant_id field\n servers = self.compute.servers.list(search_opts={'all_tenants': 'True'})\n\n servers = Scope.filter(servers, projects=projects)\n return utils.paginate(servers, pecan.request.GET.get('marker'),\n pecan.request.GET.get('limit'))",
"def api_get(self):\n sdc = DataCenter(location=self.joyent_uri, key_id=self.joyent_key_id, secret=self.joyent_secret,\n allow_agent=False, verbose=self.debug)\n servers = sdc.machines()\n return servers"
] | [
"0.63462883",
"0.6167778",
"0.61399084",
"0.59553576",
"0.59076846",
"0.58945465",
"0.5873588",
"0.5869044",
"0.5823339",
"0.5819195",
"0.57711154",
"0.57705265",
"0.57296467",
"0.5689249",
"0.56749696",
"0.56244224",
"0.559568",
"0.55720776",
"0.5557791",
"0.5500594",
"0.5459381",
"0.5454904",
"0.5390291",
"0.5373276",
"0.5351767",
"0.5339174",
"0.5338843",
"0.5298209",
"0.52979946",
"0.5290423"
] | 0.6263745 | 1 |
A server profile template object will be returned with the configuration based on this profile. [Arguments] | def fusion_api_get_server_profile_new_template(self, uri, api=None, headers=None):
return self.profile.get(uri=uri, api=api, headers=headers, param="/new-profile-template") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_create_server_profile_template(self, body, api=None, headers=None):\n return self.profile_template.create(body, api, headers)",
"def fusion_api_get_server_profile_template_new_profile(self, uri, api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=\"/new-profile\")",
"def fusion_api_get_server_profile_templates(self, uri=None, param='', api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=param)",
"def create_profile_from_template(*template_profile_obj):\n\n logger._log_to_console_and_log_file(\"Navigating to server profile template page...\")\n if not navigate():\n return False\n\n if isinstance(template_profile_obj, test_data.DataObj):\n template_profile_obj = [template_profile_obj]\n elif isinstance(template_profile_obj, tuple):\n template_profile_obj = list(template_profile_obj[0])\n\n for prof in template_profile_obj:\n\n \"\"\" Selecting profile template \"\"\"\n if not select_profile_template(prof.templ_name):\n ui_lib.fail_test(\"profile template is not present in template list\")\n\n logger._log_to_console_and_log_file(\"verifying for profile existence before proceeding to create\")\n if prof.has_property(\"prof_name\") and prof.prof_name.strip() != \"\":\n if serverprofiles.select_server_profile(prof.prof_name):\n ui_lib.fail_test(\"FAIL: Server profile '{0}' is already present\".format(prof.prof_name))\n else:\n ui_lib.fail_test(\"'prof_name' is a mandatory field and should not be empty\")\n\n logger._log_to_console_and_log_file(\"Powering of server '{0}\".format(prof.server))\n if prof.server.strip() != \"unassigned\" and not (serverhardware.power_off_server(prof.server)):\n ui_lib.fail_test(\"Can't proceed with server profile creation on server %s\" % prof.server)\n\n if not ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_PAGE_LABEL):\n if not navigate():\n ui_lib.fail_test(\"FAIL: failed to navigate profile template page\")\n\n logger._log_to_console_and_log_file(\"Selecting Create server profile option from Actions menu\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE)\n\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME)\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME, prof.prof_name)\n\n if prof.has_property(\"prof_description\") and prof.prof_description.strip() != \"\":\n logger._log_to_console_and_log_file(\"Entering profile description: '{0}'\".format(prof.prof_description))\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_DESCRIPTION, prof.prof_description)\n\n if prof.has_property(\"server\") and prof.server.strip() != \"\":\n logger._log_to_console_and_log_file(\"Selecting sever '{0}' to create profile\".format(prof.server))\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server):\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server)\n logger._log_to_console_and_log_file(\"Selected valid server hardware\")\n else:\n ui_lib.fail_test(\"Provided server '{0}' is not a valid\".format(prof.server))\n else:\n ui_lib.fail_test(\"'server' name is a mandatory field and should not be empty\")\n\n if prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'false':\n logger._log_to_console_and_log_file(\"Creating server profile from template without overriding template\")\n elif prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'true':\n logger._log_to_console_and_log_file(\"Creating server profile from template with overriding template\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_CHECKBOX_OVERRIDE_TEMPALTE)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE, PerfConstants.SELECT_ENCLOSURE * 3)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.SELECT_ENCLOSURE)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.WAIT_UNTIL_CONSTANT):\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n else:\n ui_lib.fail_test(ui_lib.get_text(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR))\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % prof.prof_name, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.ignore_staleElementRefException(\"_is_visible\", FusionServerProfilesPage.ID_PROFILE_CHANGING)\n logger._log_to_console_and_log_file(\"Waiting for profile creation to complete..\")\n\n logger._log_to_console_and_log_file(\"Validating profile %s\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ELEMENT_ACTIVITY % prof.prof_name):\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_OK, PerfConstants.CREATE_SERVER_PROFILE_TIME):\n logger._log_to_console_and_log_file(\"Profile template %s created\" % prof.prof_name)\n elif ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_WARNING):\n logger._warn(\"Profile %s created with warning\" % prof.prof_name)\n else:\n logger._warn(\"Failed to create server profile %s\" % prof.prof_name)\n return False\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n\n return True",
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def make_ServerProfileTemplateV1(name=None,\n description=None,\n serverProfileDescription=None,\n serverHardwareTypeUri=None,\n enclosureGroupUri=None,\n affinity=None,\n hideUnusedFlexNics=None,\n profileConnectionV4=None,\n firmwareSettingsV3=None,\n bootSettings=None,\n bootModeSetting=None,\n sanStorageV3=None):\n return {\n 'type': 'ServerProfileTemplateV1',\n 'name': name,\n 'description': description,\n 'serverProfileDescription': serverProfileDescription,\n 'serverHardwareTypeUri': serverHardwareTypeUri,\n 'enclosureGroupUri': enclosureGroupUri,\n 'affinity': affinity,\n 'hideUnusedFlexNics': hideUnusedFlexNics,\n 'connections': profileConnectionV4,\n 'firmware': firmwareSettingsV3,\n 'boot': bootSettings,\n 'bootMode': bootModeSetting,\n 'sanStorage': sanStorageV3\n }",
"def profile():\n \n return render_template(\"profile.html\")",
"def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)",
"def copy_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=8)\n\n total = len(profile_template_obj)\n source_not_exists = 0\n target_already_exists = 0\n copied = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile template with name '%s' ...\" % profile_template.source)\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.source, fail_if_false=False) is False:\n logger.warn(\"source server profile template '%s' does not exist\" % profile_template.source)\n source_not_exists += 1\n continue\n\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False) is False:\n logger.warn(\"target server profile template '%s' already exists!\" % profile_template.name)\n target_already_exists += 1\n continue\n\n # open Copy SP dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.source)\n\n CopyServerProfileTemplate.select_action_copy()\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfileTemplate.input_name(profile_template.name)\n CopyServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = CopyServerProfileTemplate.get_selected_server_hardware_type(profile_template.name)\n # if profile_template.hardwareType not in sht_selected:\n # logger.warn(\"server hardware type '%s' of server profile template '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile_template.name, profile_template.hardwareType))\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n CopyServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfileTemplate.Advanced.set(profile_template)\n\n CopyServerProfileTemplate.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile_template.source, profile_template.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - source_not_exists - target_already_exists == 0:\n logger.warn(\"no server profile template to copy! all %s server profile template(s) is either source-NOT-existing or target-ALREADY-existing, test is considered FAILED\" % (source_not_exists + target_already_exists))\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile template(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + source_not_exists + target_already_exists == total:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, test is considered FAILED\" % (source_not_exists, target_already_exists))\n return False\n else:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, %s template(s) left is failed being copied \" % (source_not_exists, target_already_exists, total - copied - source_not_exists - target_already_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def get_config_template(self) -> cconfig.Config:",
"def get_profile(self, profiles, settings=None, options=None, conf=None, cwd=None):\n assert isinstance(profiles, list), \"Please provide a list of profiles\"\n cache = ClientCache(self._conan_api.cache_folder)\n loader = ProfileLoader(cache)\n profile = loader.from_cli_args(profiles, settings, options, conf, cwd)\n profile.conf.validate()\n cache.new_config.validate()\n # Apply the new_config to the profiles the global one, so recipes get it too\n profile.conf.rebase_conf_definition(cache.new_config)\n return profile",
"def template(self):\n return self.conf.get(\"template\", None)",
"def update_profile_from_template(profile):\n selenium2lib = ui_lib.get_s2l()\n if not select_server_profile(profile):\n ui_lib.fail_test(\"Failed to select profile %s\" % profile)\n\n logger._log_to_console_and_log_file(\"power off server before updating profile from template\")\n profile_attributes = get_server_profile_attributes(profile, None)\n if profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile)\n elif profile_attributes[\"server power\"].lower() == \"on\":\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION)\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n ui_lib.fail_test(\"Power off option is not available in the Actions menu\")\n\n # Select update from template option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_UPDATE_FROM_TEMPLATE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MSG_TO_POWER_OFF_SERVER):\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n else:\n ui_lib.fail_test(\"Server should be powered off to update profile\")\n logger.debug(\"waiting for progress bar indicates to 'ok'\")\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_NOTIFICATION_OK, 300):\n logger._log_to_console_and_log_file(\"Server profile '%s' updated successfully from template\" % profile)\n return True\n else:\n ui_lib.fail_test(\"Failed to update server profile '%s' from template\" % profile)",
"def edit_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n edited = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n continue\n\n # get new server hardware type for edit\n enclosure_group = profile_template.enclgroup if getattr(profile_template, 'enclgroup', None) is not None else None\n sht_new = None\n if getattr(profile_template, 'new_sht_ref_server', None) is not None:\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.new_sht_ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_new = get_type_of_server_hardware(profile_template.new_sht_ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n elif getattr(profile_template, 'hardwareType', None) is not None:\n sht_new = profile_template.hardwareType\n\n # open Edit SPT dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.name)\n\n EditServerProfileTemplate.select_action_edit()\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfileTemplate.input_name(profile_template.newName) if getattr(profile_template, 'newName', None) is not None else None\n EditServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if sht_new is not None and sht_new not in sht_selected:\n logger.info(\"server hardware type '%s' is NOT consistent with current value '%s'\" % (sht_new, sht_selected))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(sht_new, enclosure_group, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfileTemplate.get_selected_enclosure_group()\n if enclosure_group is not None and enclosure_group not in eg_selected:\n logger.warn(\"enclosure group '%s' is NOT consistent with test data '%s'\" % (eg_selected, enclosure_group))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(enclosure_group, timeout=5, fail_if_false=False)\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n EditServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection().set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfileTemplate.Advanced.set(profile_template)\n\n EditServerProfileTemplate.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile_template.newName if getattr(profile_template, 'newName', None) is not None else profile_template.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to edit! all %s server profile template(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile template(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, %s profile template(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def create_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n created = 0\n already_exists = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is already existing\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n already_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_select_server_profile_template(profile.prof_temp)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if getattr(profile, 'hardwareType', None) is not None:\n hardware_type = profile.hardwareType\n\n if str(hardware_type)[:2:] == 'BL' or profile.server == 'unassigned':\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CreateServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfile.Advanced.set(profile)\n\n CreateServerProfile.click_create_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % profile.name)\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180, fail_if_false=False) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n if FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=timeout, fail_if_false=False) is True:\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok_or_warn(profile.name, timeout=180, fail_if_false=False) is True:\n logger.info(\"created server profile '%s' successfully\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_server_profile_status_ok_or_warn' = FALSE, skip to next profile ... \")\n continue\n else:\n logger.warn(\"'wait_activity_action_ok' = FALSE, skip to next profile ... \")\n FusionUIBase.show_activity_sidebar()\n continue\n else:\n logger.info(\"created server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_create_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n CreateServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n ui_lib.fail_test(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def globalProfile():\n return context.profiles",
"def profile():\r\n user_data = load_user(current_user.id, current_user)\r\n if user_data is not None:\r\n user, followers, following = user_data\r\n\r\n return render_base_template(\"profile.html\", profile=user, followers=followers,\r\n following=following,\r\n os_projects=[])\r\n\r\n return abort(404)",
"def _validate_node_server_profile_template(oneview_client, oneview_info):\n server_profile_template = oneview_client.server_profile_templates.get(\n oneview_info['server_profile_template_uri'])\n server_hardware = oneview_client.server_hardware.get(\n oneview_info['server_hardware_uri'])\n\n _validate_server_profile_template_server_hardware_type(\n server_profile_template, server_hardware)\n _validate_spt_enclosure_group(server_profile_template, server_hardware)\n _validate_server_profile_template_manage_boot(server_profile_template)",
"def server_profile(self, server_profile):\n\n self._server_profile = server_profile",
"async def provision(\n self, context: InjectionContext, config: Mapping[str, Any] = None\n ) -> Profile:",
"def get_configuration_template(self):\n return CONFIG_TEMPLATE",
"def fusion_api_create_server_profile(self, body, api=None, headers=None, param=''):\n return self.profile.create(body, api, headers, param=param)",
"def copy_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=8)\n\n total = len(profile_obj)\n not_exists = 0\n copied = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile with name '%s' ...\" % profile.source)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.source, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.source)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.source)\n CopyServerProfile.select_action_copy()\n CopyServerProfile.wait_copy_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfile.input_name(profile.name)\n CopyServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n\n if not CopyServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for copying server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip copying profile '%s' and continue to edit other server profiles\" % (profile.server, profile.source))\n continue\n msg = CopyServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be copied successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n sht_selected = CopyServerProfile.get_selected_server_hardware_type(profile.server)\n if hasattr(profile, 'hardwareType'):\n hardware_type = profile.hardwareType\n else:\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n else:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.server)\n\n if str(hardware_type) not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CopyServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfile.Advanced.set(profile)\n\n CopyServerProfile.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfile.wait_copy_server_profile_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=1800, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile.source, profile.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to copy! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile(s) is skipped being copied, %s profile(s) left is failed being copied \" % (not_exists, total - copied - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def configuration_profile(self) -> Optional[str]:\n return pulumi.get(self, \"configuration_profile\")",
"def generate_haproxy_config(template=None, instances=None):\n\n return Template(filename=template).render(instances=instances)",
"def load_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = Profile()\r\n profiles[\"Default\"].Name = \"Default\"\r\n #Some default templates\r\n profiles[\"Default\"].FileTemplate = \"{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}\"\r\n profiles[\"Default\"].FolderTemplate = \"{<publisher>}\\{<imprint>}\\{<series>}{ (<startyear>{ <format>})}\"\r\n \r\n if not lastused:\r\n lastused = [profiles.keys()[0]]\r\n \r\n return profiles, lastused",
"def GenerateConfig(context):\r\n \r\n module = \"frontend\"\r\n cc = config_merger.ConfigContext(context.properties, module)\r\n \r\n return {\r\n 'resources': [{\r\n 'name': 'simple_frontend',\r\n 'type': 'simple_frontend.py',\r\n 'properties': context.properties\r\n }], \r\n 'outputs': [{\r\n 'name': 'env_name',\r\n 'value': context.properties[\"envName\"]\r\n },{\r\n 'name': 'context',\r\n 'value': cc.configs['CONTEXT']\r\n },{\r\n 'name': 'HQ_Address',\r\n 'value': cc.configs['HQ_Address']\r\n },{\r\n 'name': 'ServiceName',\r\n 'value': cc.configs['ServiceName']\r\n },{\r\n 'name': 'versionNR',\r\n 'value': cc.configs['versionNR']\r\n },{\r\n 'name': 'outp_3',\r\n 'value':str(cc.configs)\r\n }]\r\n \r\n }",
"def _get_profile(self):\n return self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"profile\")\n )",
"def profiles():\n images = get_uploaded_images()\n records = db.session.query(UserProfile).all()\n return render_template('profiles.html', images=images, records =records)",
"def from_cli_args(self, profiles, settings, options, conf, cwd):\n if conf and any(CORE_CONF_PATTERN.match(c) for c in conf):\n raise ConanException(\"[conf] 'core.*' configurations are not allowed in profiles.\")\n\n result = Profile()\n for p in profiles:\n tmp = self.load_profile(p, cwd)\n result.compose_profile(tmp)\n\n args_profile = _profile_parse_args(settings, options, conf)\n result.compose_profile(args_profile)\n # Only after everything has been aggregated, try to complete missing settings\n profile_plugin = self._load_profile_plugin()\n if profile_plugin is not None:\n try:\n profile_plugin(result)\n except Exception as e:\n msg = f\"Error while processing 'profile.py' plugin\"\n msg = scoped_traceback(msg, e, scope=\"/extensions/plugins\")\n raise ConanException(msg)\n result.process_settings(self._cache)\n return result"
] | [
"0.66830575",
"0.66017467",
"0.65351486",
"0.64352024",
"0.62832457",
"0.6248971",
"0.6214098",
"0.6076305",
"0.58839446",
"0.5786617",
"0.564346",
"0.56402904",
"0.5597847",
"0.5524104",
"0.5458989",
"0.5417375",
"0.5369805",
"0.5351991",
"0.5331617",
"0.53293836",
"0.53194445",
"0.530088",
"0.52806497",
"0.5278363",
"0.5231747",
"0.52174824",
"0.5215825",
"0.51329887",
"0.51222694",
"0.510821"
] | 0.6721189 | 0 |
Creates a Server Profile Template. [Arguments] | def fusion_api_create_server_profile_template(self, body, api=None, headers=None):
return self.profile_template.create(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def make_ServerProfileTemplateV1(name=None,\n description=None,\n serverProfileDescription=None,\n serverHardwareTypeUri=None,\n enclosureGroupUri=None,\n affinity=None,\n hideUnusedFlexNics=None,\n profileConnectionV4=None,\n firmwareSettingsV3=None,\n bootSettings=None,\n bootModeSetting=None,\n sanStorageV3=None):\n return {\n 'type': 'ServerProfileTemplateV1',\n 'name': name,\n 'description': description,\n 'serverProfileDescription': serverProfileDescription,\n 'serverHardwareTypeUri': serverHardwareTypeUri,\n 'enclosureGroupUri': enclosureGroupUri,\n 'affinity': affinity,\n 'hideUnusedFlexNics': hideUnusedFlexNics,\n 'connections': profileConnectionV4,\n 'firmware': firmwareSettingsV3,\n 'boot': bootSettings,\n 'bootMode': bootModeSetting,\n 'sanStorage': sanStorageV3\n }",
"def create_profile_from_template(*template_profile_obj):\n\n logger._log_to_console_and_log_file(\"Navigating to server profile template page...\")\n if not navigate():\n return False\n\n if isinstance(template_profile_obj, test_data.DataObj):\n template_profile_obj = [template_profile_obj]\n elif isinstance(template_profile_obj, tuple):\n template_profile_obj = list(template_profile_obj[0])\n\n for prof in template_profile_obj:\n\n \"\"\" Selecting profile template \"\"\"\n if not select_profile_template(prof.templ_name):\n ui_lib.fail_test(\"profile template is not present in template list\")\n\n logger._log_to_console_and_log_file(\"verifying for profile existence before proceeding to create\")\n if prof.has_property(\"prof_name\") and prof.prof_name.strip() != \"\":\n if serverprofiles.select_server_profile(prof.prof_name):\n ui_lib.fail_test(\"FAIL: Server profile '{0}' is already present\".format(prof.prof_name))\n else:\n ui_lib.fail_test(\"'prof_name' is a mandatory field and should not be empty\")\n\n logger._log_to_console_and_log_file(\"Powering of server '{0}\".format(prof.server))\n if prof.server.strip() != \"unassigned\" and not (serverhardware.power_off_server(prof.server)):\n ui_lib.fail_test(\"Can't proceed with server profile creation on server %s\" % prof.server)\n\n if not ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_PAGE_LABEL):\n if not navigate():\n ui_lib.fail_test(\"FAIL: failed to navigate profile template page\")\n\n logger._log_to_console_and_log_file(\"Selecting Create server profile option from Actions menu\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE)\n\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME)\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME, prof.prof_name)\n\n if prof.has_property(\"prof_description\") and prof.prof_description.strip() != \"\":\n logger._log_to_console_and_log_file(\"Entering profile description: '{0}'\".format(prof.prof_description))\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_DESCRIPTION, prof.prof_description)\n\n if prof.has_property(\"server\") and prof.server.strip() != \"\":\n logger._log_to_console_and_log_file(\"Selecting sever '{0}' to create profile\".format(prof.server))\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server):\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server)\n logger._log_to_console_and_log_file(\"Selected valid server hardware\")\n else:\n ui_lib.fail_test(\"Provided server '{0}' is not a valid\".format(prof.server))\n else:\n ui_lib.fail_test(\"'server' name is a mandatory field and should not be empty\")\n\n if prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'false':\n logger._log_to_console_and_log_file(\"Creating server profile from template without overriding template\")\n elif prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'true':\n logger._log_to_console_and_log_file(\"Creating server profile from template with overriding template\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_CHECKBOX_OVERRIDE_TEMPALTE)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE, PerfConstants.SELECT_ENCLOSURE * 3)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.SELECT_ENCLOSURE)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.WAIT_UNTIL_CONSTANT):\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n else:\n ui_lib.fail_test(ui_lib.get_text(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR))\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % prof.prof_name, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.ignore_staleElementRefException(\"_is_visible\", FusionServerProfilesPage.ID_PROFILE_CHANGING)\n logger._log_to_console_and_log_file(\"Waiting for profile creation to complete..\")\n\n logger._log_to_console_and_log_file(\"Validating profile %s\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ELEMENT_ACTIVITY % prof.prof_name):\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_OK, PerfConstants.CREATE_SERVER_PROFILE_TIME):\n logger._log_to_console_and_log_file(\"Profile template %s created\" % prof.prof_name)\n elif ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_WARNING):\n logger._warn(\"Profile %s created with warning\" % prof.prof_name)\n else:\n logger._warn(\"Failed to create server profile %s\" % prof.prof_name)\n return False\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n\n return True",
"def fusion_api_get_server_profile_new_template(self, uri, api=None, headers=None):\n return self.profile.get(uri=uri, api=api, headers=headers, param=\"/new-profile-template\")",
"def create_template(self):\n return '{}/{}.html'.format(self.object_name, self.create_endpoint)",
"def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n 'answers_file': self.answers_file,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/%(answers_file)s %(dir)s/bobtemplates/%(template)s'\n % options)",
"def create(*args, **kwargs):\n\n factory = V2ProfileFactory()\n output = factory.create(export_json=True)\n click.echo(output)",
"def fusion_api_get_server_profile_template_new_profile(self, uri, api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=\"/new-profile\")",
"def copy_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=8)\n\n total = len(profile_template_obj)\n source_not_exists = 0\n target_already_exists = 0\n copied = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile template with name '%s' ...\" % profile_template.source)\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.source, fail_if_false=False) is False:\n logger.warn(\"source server profile template '%s' does not exist\" % profile_template.source)\n source_not_exists += 1\n continue\n\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False) is False:\n logger.warn(\"target server profile template '%s' already exists!\" % profile_template.name)\n target_already_exists += 1\n continue\n\n # open Copy SP dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.source)\n\n CopyServerProfileTemplate.select_action_copy()\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfileTemplate.input_name(profile_template.name)\n CopyServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = CopyServerProfileTemplate.get_selected_server_hardware_type(profile_template.name)\n # if profile_template.hardwareType not in sht_selected:\n # logger.warn(\"server hardware type '%s' of server profile template '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile_template.name, profile_template.hardwareType))\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n CopyServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfileTemplate.Advanced.set(profile_template)\n\n CopyServerProfileTemplate.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile_template.source, profile_template.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - source_not_exists - target_already_exists == 0:\n logger.warn(\"no server profile template to copy! all %s server profile template(s) is either source-NOT-existing or target-ALREADY-existing, test is considered FAILED\" % (source_not_exists + target_already_exists))\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile template(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + source_not_exists + target_already_exists == total:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, test is considered FAILED\" % (source_not_exists, target_already_exists))\n return False\n else:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, %s template(s) left is failed being copied \" % (source_not_exists, target_already_exists, total - copied - source_not_exists - target_already_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def create_launch_template(ec2_client: BaseClient,\n template_name: str,\n image_id: str,\n key_name: str,\n instance_type: str,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: Optional[str] = None,\n tags: Optional[Dict[str, str]] = None) -> str:\n logger.info('Creating launch template for %s instances ... ', instance_type)\n\n if isinstance(user_data, str):\n # Make sure we have bytes\n user_data = user_data.encode('utf-8')\n\n # Then base64 and decode back to str.\n user_data = b64encode(user_data).decode('utf-8')\n\n template = {'ImageId': image_id,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n template['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n template['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n template['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n request = {'LaunchTemplateData': prune(template),\n 'LaunchTemplateName': template_name}\n\n if tags:\n request['TagSpecifications'] = [{'ResourceType': 'launch-template', 'Tags': flat_tags}]\n\n return ec2_client.create_launch_template(**request)['LaunchTemplate']['LaunchTemplateId']",
"def create_template(self, **kwargs):\n _template = self.get_template(name=kwargs[\"name\"])\n if _template:\n raise ValueError(f\"Template already used: {kwargs['name']}\")\n\n if \"compute_id\" not in kwargs:\n kwargs[\"compute_id\"] = \"local\"\n\n response = self.http_call(\n \"post\", url=f\"{self.base_url}/templates\", json_data=kwargs\n )\n\n return response.json()",
"def create_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n created = 0\n already_exists = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is already existing\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n already_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_select_server_profile_template(profile.prof_temp)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if getattr(profile, 'hardwareType', None) is not None:\n hardware_type = profile.hardwareType\n\n if str(hardware_type)[:2:] == 'BL' or profile.server == 'unassigned':\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CreateServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfile.Advanced.set(profile)\n\n CreateServerProfile.click_create_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % profile.name)\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180, fail_if_false=False) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n if FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=timeout, fail_if_false=False) is True:\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok_or_warn(profile.name, timeout=180, fail_if_false=False) is True:\n logger.info(\"created server profile '%s' successfully\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_server_profile_status_ok_or_warn' = FALSE, skip to next profile ... \")\n continue\n else:\n logger.warn(\"'wait_activity_action_ok' = FALSE, skip to next profile ... \")\n FusionUIBase.show_activity_sidebar()\n continue\n else:\n logger.info(\"created server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_create_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n CreateServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n ui_lib.fail_test(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def create_template(tmpl_id, service):\n # copy template and set context data struct for merging template values\n destination_id = _create_template( service)\n\n create_label_table(destination_id, service)\n\n return destination_id",
"def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)",
"def Create( profile_name,\r\n host,\r\n username=None,\r\n password=None,\r\n port=26,\r\n from_name=None,\r\n from_email=None,\r\n ssl=False,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n if not from_name and not from_email:\r\n raise CommandLine.UsageException(\"'from_name' or 'from_email' must be provided\")\r\n\r\n mailer = SmtpMailer( host,\r\n username=username,\r\n password=password,\r\n port=port,\r\n from_name=from_name,\r\n from_email=from_email,\r\n ssl=ssl,\r\n )\r\n mailer.Save(profile_name)\r\n\r\n output_stream.write(\"The profile '{}' has been created.\\n\".format(profile_name))",
"def fusion_api_create_server_profile(self, body, api=None, headers=None, param=''):\n return self.profile.create(body, api, headers, param=param)",
"def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)",
"def test_create_template_subsciption(self):\n pass",
"def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)",
"def edit_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n edited = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n continue\n\n # get new server hardware type for edit\n enclosure_group = profile_template.enclgroup if getattr(profile_template, 'enclgroup', None) is not None else None\n sht_new = None\n if getattr(profile_template, 'new_sht_ref_server', None) is not None:\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.new_sht_ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_new = get_type_of_server_hardware(profile_template.new_sht_ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n elif getattr(profile_template, 'hardwareType', None) is not None:\n sht_new = profile_template.hardwareType\n\n # open Edit SPT dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.name)\n\n EditServerProfileTemplate.select_action_edit()\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfileTemplate.input_name(profile_template.newName) if getattr(profile_template, 'newName', None) is not None else None\n EditServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if sht_new is not None and sht_new not in sht_selected:\n logger.info(\"server hardware type '%s' is NOT consistent with current value '%s'\" % (sht_new, sht_selected))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(sht_new, enclosure_group, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfileTemplate.get_selected_enclosure_group()\n if enclosure_group is not None and enclosure_group not in eg_selected:\n logger.warn(\"enclosure group '%s' is NOT consistent with test data '%s'\" % (eg_selected, enclosure_group))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(enclosure_group, timeout=5, fail_if_false=False)\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n EditServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection().set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfileTemplate.Advanced.set(profile_template)\n\n EditServerProfileTemplate.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile_template.newName if getattr(profile_template, 'newName', None) is not None else profile_template.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to edit! all %s server profile template(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile template(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, %s profile template(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def create_test_deploy_template(**kw):\n template = get_test_deploy_template(**kw)\n dbapi = db_api.get_instance()\n # Let DB generate an ID if one isn't specified explicitly.\n if 'id' not in kw:\n del template['id']\n if 'steps' not in kw:\n for step in template['steps']:\n del step['id']\n del step['deploy_template_id']\n else:\n for kw_step, template_step in zip(kw['steps'], template['steps']):\n if 'id' not in kw_step:\n del template_step['id']\n return dbapi.create_deploy_template(template)",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack00.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(self.boxsize)+\",\"+str(self.boxsize)\n\t\t\t\t+\" spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def action(self, args):\n create_new_scratch_file(args.file, self.settings, py_template_func)",
"def _make_cloudformation_template(\n project_dir,\n user_data,\n s3_bucket_name,\n sam_template_name,\n elb_name,\n ami_id,\n instance_type,\n autoscaling_min_size,\n autoscaling_desired_capacity,\n autoscaling_max_size,\n):\n\n template_file_path = os.path.join(project_dir, sam_template_name)\n with open(template_file_path, \"a\") as f:\n f.write(\n \"\"\"\\\nAWSTemplateFormatVersion: 2010-09-09\nTransform: AWS::Serverless-2016-10-31\nDescription: BentoML load balanced template\nParameters:\n AmazonLinux2LatestAmiId:\n Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>\n Default: {ami_id}\nResources:\n SecurityGroupResource:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for bentoservice\"\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 5000\n ToPort: 5000\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 22\n ToPort: 22\n VpcId: !Ref Vpc1\n\n Ec2InstanceECRProfile:\n Type: AWS::IAM::InstanceProfile\n Properties:\n Path: /\n Roles: [!Ref EC2Role]\n\n EC2Role:\n Type: AWS::IAM::Role\n Properties:\n AssumeRolePolicyDocument:\n Statement:\n - Effect: Allow\n Principal:\n Service: [ec2.amazonaws.com]\n Action: ['sts:AssumeRole']\n Path: /\n Policies:\n - PolicyName: ecs-service\n PolicyDocument:\n Statement:\n - Effect: Allow\n Action:\n - 'ecr:GetAuthorizationToken'\n - 'ecr:BatchGetImage'\n - 'ecr:GetDownloadUrlForLayer'\n Resource: '*'\n\n LaunchTemplateResource:\n Type: AWS::EC2::LaunchTemplate\n Properties:\n LaunchTemplateName: {template_name}\n LaunchTemplateData:\n IamInstanceProfile:\n Arn: !GetAtt Ec2InstanceECRProfile.Arn\n ImageId: !Ref AmazonLinux2LatestAmiId\n InstanceType: {instance_type}\n UserData: \"{user_data}\"\n SecurityGroupIds:\n - !GetAtt SecurityGroupResource.GroupId\n\n TargetGroup:\n Type: AWS::ElasticLoadBalancingV2::TargetGroup\n Properties:\n VpcId: !Ref Vpc1\n Protocol: HTTP\n Port: 5000\n TargetType: instance\n HealthCheckEnabled: true\n HealthCheckIntervalSeconds: {target_health_check_interval_seconds}\n HealthCheckPath: {target_health_check_path}\n HealthCheckPort: {target_health_check_port}\n HealthCheckProtocol: HTTP\n HealthCheckTimeoutSeconds: {target_health_check_timeout_seconds}\n HealthyThresholdCount: {target_health_check_threshold_count}\n\n LoadBalancerSecurityGroup:\n Type: AWS::EC2::SecurityGroup\n Properties:\n GroupDescription: \"security group for loadbalancing\"\n VpcId: !Ref Vpc1\n SecurityGroupIngress:\n -\n IpProtocol: tcp\n CidrIp: 0.0.0.0/0\n FromPort: 80\n ToPort: 80\n\n InternetGateway:\n Type: AWS::EC2::InternetGateway\n\n Gateway:\n Type: AWS::EC2::VPCGatewayAttachment\n Properties:\n InternetGatewayId: !Ref InternetGateway\n VpcId: !Ref Vpc1\n\n PublicRouteTable:\n Type: AWS::EC2::RouteTable\n Properties:\n VpcId: !Ref Vpc1\n\n PublicRoute:\n Type: AWS::EC2::Route\n DependsOn: Gateway\n Properties:\n DestinationCidrBlock: 0.0.0.0/0\n GatewayId: !Ref InternetGateway\n RouteTableId: !Ref PublicRouteTable\n\n RouteTableSubnetTwoAssociationOne:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet1\n RouteTableSubnetTwoAssociationTwo:\n Type: AWS::EC2::SubnetRouteTableAssociation\n Properties:\n RouteTableId: !Ref PublicRouteTable\n SubnetId: !Ref Subnet2\n\n Vpc1:\n Type: AWS::EC2::VPC\n Properties:\n CidrBlock: 172.31.0.0/16\n EnableDnsHostnames: true\n EnableDnsSupport: true\n InstanceTenancy: default\n\n Subnet1:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.16.0/20\n MapPublicIpOnLaunch: true\n\n Subnet2:\n Type: AWS::EC2::Subnet\n Properties:\n VpcId: !Ref Vpc1\n AvailabilityZone:\n Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n CidrBlock: 172.31.0.0/20\n MapPublicIpOnLaunch: true\n\n LoadBalancer:\n Type: AWS::ElasticLoadBalancingV2::LoadBalancer\n Properties:\n IpAddressType: ipv4\n Name: {elb_name}\n Scheme: internet-facing\n SecurityGroups:\n - !Ref LoadBalancerSecurityGroup\n Subnets:\n - !Ref Subnet1\n - !Ref Subnet2\n Type: application\n\n Listener:\n Type: AWS::ElasticLoadBalancingV2::Listener\n Properties:\n DefaultActions:\n - Type: forward\n TargetGroupArn: !Ref TargetGroup\n LoadBalancerArn: !Ref LoadBalancer\n Port: 80\n Protocol: HTTP\n\n AutoScalingGroup:\n Type: AWS::AutoScaling::AutoScalingGroup\n DependsOn: Gateway\n Properties:\n MinSize: {autoscaling_min_size}\n MaxSize: {autoscaling_max_size}\n DesiredCapacity: {autoscaling_desired_capacity}\n AvailabilityZones:\n - Fn::Select:\n - 0\n - Fn::GetAZs: \"\"\n - Fn::Select:\n - 1\n - Fn::GetAZs: \"\"\n LaunchTemplate:\n LaunchTemplateId: !Ref LaunchTemplateResource\n Version: !GetAtt LaunchTemplateResource.LatestVersionNumber\n TargetGroupARNs:\n - !Ref TargetGroup\n VPCZoneIdentifier:\n - !Ref Subnet1\n - !Ref Subnet2\n UpdatePolicy:\n AutoScalingReplacingUpdate:\n WillReplace: true\n\nOutputs:\n S3Bucket:\n Value: {s3_bucket_name}\n Description: Bucket to store sam artifacts\n AutoScalingGroup:\n Value: !Ref AutoScalingGroup\n Description: Autoscaling group name\n TargetGroup:\n Value: !Ref TargetGroup\n Description: Target group for load balancer\n Url:\n Value: !Join ['', ['http://', !GetAtt [LoadBalancer, DNSName]]]\n Description: URL of the bento service\n\n\"\"\".format(\n ami_id=ami_id,\n template_name=sam_template_name,\n instance_type=instance_type,\n user_data=user_data,\n elb_name=elb_name,\n autoscaling_min_size=autoscaling_min_size,\n autoscaling_desired_capacity=autoscaling_desired_capacity,\n autoscaling_max_size=autoscaling_max_size,\n s3_bucket_name=s3_bucket_name,\n target_health_check_interval_seconds=TARGET_HEALTH_CHECK_INTERVAL,\n target_health_check_path=TARGET_HEALTH_CHECK_PATH,\n target_health_check_port=TARGET_HEALTH_CHECK_PORT,\n target_health_check_timeout_seconds=TARGET_HEALTH_CHECK_TIMEOUT_SECONDS,\n target_health_check_threshold_count=TARGET_HEALTH_CHECK_THRESHOLD_COUNT,\n )\n )\n return template_file_path",
"def create(profile, name):\n # Make sure it doesn't exist already.\n if exists(profile, name):\n msg = \"Instance profile '\" + str(name) + \"' already exists.\"\n raise ResourceAlreadyExists(msg)\n\n # Now we can create it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"create\", params)\n\n # Check that it exists.\n instance_profile_data = polling_fetch(profile, name)\n if not instance_profile_data:\n msg = \"Instance profile '\" + str(name) + \"' not created.\"\n raise ResourceNotCreated(msg)\n\n # Send back the instance profile's info.\n return instance_profile_data",
"def create(\n name: str,\n from_name: str = typer.Option(None, \"--from\", help=\"Copy an existing profile.\"),\n):\n\n profiles = prefect.settings.load_profiles()\n if name in profiles:\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n [red]Profile {name!r} already exists.[/red]\n To create a new profile, remove the existing profile first:\n\n prefect profile delete {name!r}\n \"\"\"\n ).strip()\n )\n raise typer.Exit(1)\n\n if from_name:\n if from_name not in profiles:\n exit_with_error(f\"Profile {from_name!r} not found.\")\n\n # Create a copy of the profile with a new name and add to the collection\n profiles.add_profile(profiles[from_name].copy(update={\"name\": name}))\n else:\n profiles.add_profile(prefect.settings.Profile(name=name, settings={}))\n\n prefect.settings.save_profiles(profiles)\n\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n Created profile with properties:\n name - {name!r}\n from name - {from_name or None}\n\n Use created profile for future, subsequent commands:\n prefect profile use {name!r}\n\n Use created profile temporarily for a single command:\n prefect -p {name!r} config view\n \"\"\"\n )\n )",
"def __setup_template(self):\n template = Template()\n template.add_description(\"Service VPC - used for services\")\n\n template.add_metadata({\n \"Build\": \"development\",\n \"DependsOn\": [],\n \"Environment\": \"ApiDev\",\n \"Revision\": \"develop\",\n \"StackName\": \"ApiDev-Dev-VPC\",\n \"StackType\": \"InfrastructureResource\",\n \"TemplateBucket\": \"cfn-apidev\",\n \"TemplateName\": \"VPC\",\n \"TemplatePath\": \"ApiDev/Dev/VPC\"\n })\n\n vpc = template.add_resource(\n ec2.VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n EnableDnsHostnames=\"true\",\n EnableDnsSupport=\"true\",\n InstanceTenancy=\"default\",\n Tags=self.__get_tags(\"ServiceVPC\"),\n )\n )\n\n instance_sg = template.add_resource(\n ec2.SecurityGroup(\n \"BastionSG\",\n GroupDescription=\"Used for source/dest rules\",\n Tags=self.__get_tags(\"VPC-Bastion-SG\"),\n VpcId=Ref(\n vpc\n )\n ),\n )\n\n cw_alarm_topic = template.add_resource(\n Topic(\n \"CloudWatchAlarmTopic\",\n TopicName=\"ApiDev-Dev-CloudWatchAlarms\",\n )\n )\n\n dhcp_options = template.add_resource(\n ec2.DHCPOptions(\n \"DhcpOptions\",\n DomainName=Join(\n \"\",\n [\n Ref(\"AWS::Region\"),\n \".compute.internal\"\n ]\n ),\n DomainNameServers=[\"AmazonProvidedDNS\"],\n Tags=self.__get_tags(\"DhcpOptions\"),\n )\n )\n\n gateway = template.add_resource(\n ec2.InternetGateway(\n \"InternetGateway\",\n Tags=self.__get_tags(\"InternetGateway\")\n )\n )\n\n nat_emergency_topic = template.add_resource(\n Topic(\n \"NatEmergencyTopic\",\n TopicName=\"ApiDev-Dev-NatEmergencyTopic\",\n )\n )\n\n vpc_dhcp_options_assoc = template.add_resource(\n ec2.VPCDHCPOptionsAssociation(\n \"VpcDhcpOptionsAssociation\",\n DhcpOptionsId=Ref(\n dhcp_options\n ),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_gw_attachment = template.add_resource(\n ec2.VPCGatewayAttachment(\n \"VpcGatewayAttachment\",\n InternetGatewayId=Ref(\n gateway\n ),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_network_acl = template.add_resource(\n ec2.NetworkAcl(\n \"VpcNetworkAcl\",\n Tags=self.__get_tags(\"NetworkAcl\"),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_network_acl_rules = template.add_resource([\n ec2.NetworkAclEntry(\n \"VpcNetworkAclInboundRulePublic443\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"443\",\n To=\"443\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=20001\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclInboundRulePublic80\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"80\",\n To=\"80\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=20000\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclOutboundRule\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"true\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n Protocol=\"-1\",\n RuleAction=\"allow\",\n RuleNumber=30000\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclSsh\",\n CidrBlock=\"127.0.0.1/32\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"22\",\n To=\"22\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=10000\n )\n ])\n\n template.add_output([\n Output(\n \"BastionSG\",\n Value=Ref(instance_sg)\n ),\n Output(\n \"CloudWatchAlarmTopic\",\n Value=Ref(cw_alarm_topic)\n ),\n Output(\n \"InternetGateway\",\n Value=Ref(gateway)\n ),\n Output(\n \"NatEmergencyTopicARN\",\n Value=Ref(nat_emergency_topic)\n ),\n Output(\n \"VPCID\",\n Value=Ref(vpc)\n ),\n Output(\n \"VPCName\",\n Value=Ref(\"AWS::StackName\")\n ),\n Output(\n \"VpcNetworkAcl\",\n Value=Ref(vpc_network_acl)\n )\n\n ])\n\n return template",
"def create(ctx):\n pass",
"def create(self, template, print_cmd=False):\n brand_mapping = {'solaris11' : 'SYSsolaris', 'solaris' : 'SYSsolaris', 'solaris10' : 'SYSsolaris10'}\n if brand_mapping.has_key(template):\n template = brand_mapping[template]\n\n return self._create_minimal(template, print_cmd)\n\n #self._write_sysidcfg()",
"def fusion_api_get_server_profile_templates(self, uri=None, param='', api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=param)"
] | [
"0.7527531",
"0.7168965",
"0.6981659",
"0.67174834",
"0.6471081",
"0.6322875",
"0.631694",
"0.62575513",
"0.6226413",
"0.6182713",
"0.61035097",
"0.6093237",
"0.5918352",
"0.5867897",
"0.5746021",
"0.5742268",
"0.57117236",
"0.5659141",
"0.5619936",
"0.56015855",
"0.55628175",
"0.55518067",
"0.55432606",
"0.5471104",
"0.5462952",
"0.54537493",
"0.5449965",
"0.5425805",
"0.54087174",
"0.53952134"
] | 0.7511335 | 1 |
Updates a Server Profile Template. [Arguments] | def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):
return self.profile_template.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n edited = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n continue\n\n # get new server hardware type for edit\n enclosure_group = profile_template.enclgroup if getattr(profile_template, 'enclgroup', None) is not None else None\n sht_new = None\n if getattr(profile_template, 'new_sht_ref_server', None) is not None:\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.new_sht_ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_new = get_type_of_server_hardware(profile_template.new_sht_ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n elif getattr(profile_template, 'hardwareType', None) is not None:\n sht_new = profile_template.hardwareType\n\n # open Edit SPT dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.name)\n\n EditServerProfileTemplate.select_action_edit()\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfileTemplate.input_name(profile_template.newName) if getattr(profile_template, 'newName', None) is not None else None\n EditServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if sht_new is not None and sht_new not in sht_selected:\n logger.info(\"server hardware type '%s' is NOT consistent with current value '%s'\" % (sht_new, sht_selected))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(sht_new, enclosure_group, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfileTemplate.get_selected_enclosure_group()\n if enclosure_group is not None and enclosure_group not in eg_selected:\n logger.warn(\"enclosure group '%s' is NOT consistent with test data '%s'\" % (eg_selected, enclosure_group))\n EditServerProfileTemplate.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(enclosure_group, timeout=5, fail_if_false=False)\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n EditServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection().set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n sht_selected = EditServerProfileTemplate.get_selected_server_hardware_type()\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfileTemplate.Advanced.set(profile_template)\n\n EditServerProfileTemplate.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfileTemplate.wait_edit_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n profile_name = profile_template.newName if getattr(profile_template, 'newName', None) is not None else profile_template.name\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_name, timeout=300, fail_if_false=True)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to edit! all %s server profile template(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile template(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped being edited, %s profile template(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def update_profile_from_template(profile):\n selenium2lib = ui_lib.get_s2l()\n if not select_server_profile(profile):\n ui_lib.fail_test(\"Failed to select profile %s\" % profile)\n\n logger._log_to_console_and_log_file(\"power off server before updating profile from template\")\n profile_attributes = get_server_profile_attributes(profile, None)\n if profile_attributes[\"server hardware\"] == \"unassigned\":\n selenium2lib.capture_page_screenshot()\n logger._warn(\"Cannot power off Server Profile '%s' due to unassigned server hardware\" % profile)\n elif profile_attributes[\"server power\"].lower() == \"on\":\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF):\n logger._log_to_console_and_log_file(\"Powering off selected server profiles\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_POWEROFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWEROFF_PRESS_HOLD)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_POWER_STATUS % \"Off\", PerfConstants.PROFILE_POWER_VALIDATION)\n logger._log_to_console_and_log_file(\"Successfully powered off Server Profiles\")\n else:\n selenium2lib.capture_page_screenshot()\n ui_lib.fail_test(\"Power off option is not available in the Actions menu\")\n\n # Select update from template option from Action menu\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_UPDATE_FROM_TEMPLATE)\n if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_MSG_TO_POWER_OFF_SERVER):\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BUTTON_PROFILE_UPDATE_CONFIRM)\n else:\n ui_lib.fail_test(\"Server should be powered off to update profile\")\n logger.debug(\"waiting for progress bar indicates to 'ok'\")\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_STATUS_NOTIFICATION_OK, 300):\n logger._log_to_console_and_log_file(\"Server profile '%s' updated successfully from template\" % profile)\n return True\n else:\n ui_lib.fail_test(\"Failed to update server profile '%s' from template\" % profile)",
"def create_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n created = 0\n already_exists = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile template with name '%s' ...\" % profile_template.name)\n # checking if the profile template is already existing\n if not VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile_template.name)\n already_exists += 1\n continue\n\n logger.info(\"getting server hardware type of server hardware '%s'\" % profile_template.ref_server)\n from FusionLibrary.ui.servers.serverhardware import get_type_of_server_hardware\n sht_selected = get_type_of_server_hardware(profile_template.ref_server)\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n # open Create SP template dialog and enter data ...\n CreateServerProfileTemplate.click_create_server_profile_template_button()\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_shown()\n\n CreateServerProfileTemplate.input_name(profile_template.name)\n CreateServerProfileTemplate.input_description(getattr(profile_template, 'desc', ''))\n CreateServerProfileTemplate.input_server_profile_description(getattr(profile_template, 'sp_desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n # input 'Enclosure group'\n CreateServerProfileTemplate.input_select_server_hardware_type(sht_selected)\n CreateServerProfileTemplate.input_select_enclosure_group(profile_template.enclgroup) if getattr(profile_template, 'enclgroup', None) is not None else None\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfileTemplate.Advanced.set(profile_template)\n\n CreateServerProfileTemplate.click_create_button()\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile_template.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CreateServerProfileTemplate.wait_create_server_profile_template_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=720, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=180, fail_if_false=True)\n logger.info(\"created server profile '%s' successfully\" % profile_template.name)\n created += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n logger.warn(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def copy_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=8)\n\n total = len(profile_template_obj)\n source_not_exists = 0\n target_already_exists = 0\n copied = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"copying a server profile template with name '%s' ...\" % profile_template.source)\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.source, fail_if_false=False) is False:\n logger.warn(\"source server profile template '%s' does not exist\" % profile_template.source)\n source_not_exists += 1\n continue\n\n # checking if the profile is not existing for editing\n if VerifyServerProfileTemplate.verify_server_profile_template_not_exist(profile_template.name, fail_if_false=False) is False:\n logger.warn(\"target server profile template '%s' already exists!\" % profile_template.name)\n target_already_exists += 1\n continue\n\n # open Copy SP dialog and enter data ...\n CommonOperationServerProfileTemplate.click_server_profile_template(profile_template.source)\n\n CopyServerProfileTemplate.select_action_copy()\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_shown()\n BuiltIn().sleep(2)\n CopyServerProfileTemplate.input_name(profile_template.name)\n CopyServerProfileTemplate.input_description(profile_template.desc) if getattr(profile_template, 'desc', None) is not None else None\n\n sht_selected = CopyServerProfileTemplate.get_selected_server_hardware_type(profile_template.name)\n # if profile_template.hardwareType not in sht_selected:\n # logger.warn(\"server hardware type '%s' of server profile template '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile_template.name, profile_template.hardwareType))\n\n if getattr(profile_template, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile_template.Affinity)\n CopyServerProfileTemplate.select_affinity_by_text(profile_template.Affinity)\n\n if getattr(profile_template, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile_template.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile_template.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfileTemplate.Firmware.set(profile_template.Firmware)\n\n if getattr(profile_template, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile_template.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfileTemplate.Connection.set(profile_template.Connections)\n\n if getattr(profile_template, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile_template.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfileTemplate.LocalStorage.set(profile_template.LocalStorage)\n\n if getattr(profile_template, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile_template.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfileTemplate.SANStorage.set(profile_template.SANStorage)\n\n if getattr(profile_template, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile_template.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfileTemplate.BootSettings.set(profile_template, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n\n if getattr(profile_template, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile_template.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CopyServerProfileTemplate.Advanced.set(profile_template)\n\n CopyServerProfileTemplate.click_create_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfileTemplate.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfileTemplate.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile_template.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n CopyServerProfileTemplate.wait_copy_server_profile_template_dialog_disappear(timeout=300)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile_template.name, 'Create', timeout=300, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfileTemplate.wait_server_profile_template_status_ok(profile_template.name, timeout=300, fail_if_false=True)\n logger.info(\"successfully copied server profile '%s' to '%s'\" % (profile_template.source, profile_template.name))\n copied += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - source_not_exists - target_already_exists == 0:\n logger.warn(\"no server profile template to copy! all %s server profile template(s) is either source-NOT-existing or target-ALREADY-existing, test is considered FAILED\" % (source_not_exists + target_already_exists))\n return False\n else:\n if copied < total:\n logger.warn(\"not all of the server profile template(s) is successfully copied - %s out of %s copied \" % (copied, total))\n if copied + source_not_exists + target_already_exists == total:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, test is considered FAILED\" % (source_not_exists, target_already_exists))\n return False\n else:\n logger.warn(\"%s source-not-existing template(s) and %s target-already-existing template(s) is skipped being copied, %s template(s) left is failed being copied \" % (source_not_exists, target_already_exists, total - copied - source_not_exists - target_already_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully copied - %s out of %s \" % (copied, total))\n return True",
"def create_profile_from_template(*template_profile_obj):\n\n logger._log_to_console_and_log_file(\"Navigating to server profile template page...\")\n if not navigate():\n return False\n\n if isinstance(template_profile_obj, test_data.DataObj):\n template_profile_obj = [template_profile_obj]\n elif isinstance(template_profile_obj, tuple):\n template_profile_obj = list(template_profile_obj[0])\n\n for prof in template_profile_obj:\n\n \"\"\" Selecting profile template \"\"\"\n if not select_profile_template(prof.templ_name):\n ui_lib.fail_test(\"profile template is not present in template list\")\n\n logger._log_to_console_and_log_file(\"verifying for profile existence before proceeding to create\")\n if prof.has_property(\"prof_name\") and prof.prof_name.strip() != \"\":\n if serverprofiles.select_server_profile(prof.prof_name):\n ui_lib.fail_test(\"FAIL: Server profile '{0}' is already present\".format(prof.prof_name))\n else:\n ui_lib.fail_test(\"'prof_name' is a mandatory field and should not be empty\")\n\n logger._log_to_console_and_log_file(\"Powering of server '{0}\".format(prof.server))\n if prof.server.strip() != \"unassigned\" and not (serverhardware.power_off_server(prof.server)):\n ui_lib.fail_test(\"Can't proceed with server profile creation on server %s\" % prof.server)\n\n if not ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_PAGE_LABEL):\n if not navigate():\n ui_lib.fail_test(\"FAIL: failed to navigate profile template page\")\n\n logger._log_to_console_and_log_file(\"Selecting Create server profile option from Actions menu\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_MENU_ACTION_CREATE_SERVER_PROFILE)\n\n ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME)\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_NAME, prof.prof_name)\n\n if prof.has_property(\"prof_description\") and prof.prof_description.strip() != \"\":\n logger._log_to_console_and_log_file(\"Entering profile description: '{0}'\".format(prof.prof_description))\n ui_lib.wait_for_element_and_input_text(FusionServerProfileTemplatesPage.ID_INPUT_PROFILE_DESCRIPTION, prof.prof_description)\n\n if prof.has_property(\"server\") and prof.server.strip() != \"\":\n logger._log_to_console_and_log_file(\"Selecting sever '{0}' to create profile\".format(prof.server))\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server):\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_ELEMENT_SERVER_NAME % prof.server)\n logger._log_to_console_and_log_file(\"Selected valid server hardware\")\n else:\n ui_lib.fail_test(\"Provided server '{0}' is not a valid\".format(prof.server))\n else:\n ui_lib.fail_test(\"'server' name is a mandatory field and should not be empty\")\n\n if prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'false':\n logger._log_to_console_and_log_file(\"Creating server profile from template without overriding template\")\n elif prof.has_property(\"override_temp\") and prof.override_temp.lower().strip() == 'true':\n logger._log_to_console_and_log_file(\"Creating server profile from template with overriding template\")\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_CHECKBOX_OVERRIDE_TEMPALTE)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE, PerfConstants.SELECT_ENCLOSURE * 3)\n ui_lib.wait_for_element_notvisible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.SELECT_ENCLOSURE)\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR, PerfConstants.WAIT_UNTIL_CONSTANT):\n if ui_lib.wait_for_element_visible(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfileTemplatesPage.ID_BTN_CREATE_PROFILE)\n else:\n ui_lib.fail_test(ui_lib.get_text(FusionServerProfileTemplatesPage.ID_DIALOG_CREATE_PROFILE_ERROR))\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % prof.prof_name, PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.ignore_staleElementRefException(\"_is_visible\", FusionServerProfilesPage.ID_PROFILE_CHANGING)\n logger._log_to_console_and_log_file(\"Waiting for profile creation to complete..\")\n\n logger._log_to_console_and_log_file(\"Validating profile %s\" % prof.prof_name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ELEMENT_ACTIVITY % prof.prof_name):\n if ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_OK, PerfConstants.CREATE_SERVER_PROFILE_TIME):\n logger._log_to_console_and_log_file(\"Profile template %s created\" % prof.prof_name)\n elif ui_lib.wait_for_element(FusionServerProfileTemplatesPage.ID_ACTIVITY_STATUS_WARNING):\n logger._warn(\"Profile %s created with warning\" % prof.prof_name)\n else:\n logger._warn(\"Failed to create server profile %s\" % prof.prof_name)\n return False\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ACTVITY_PROFILE)\n\n return True",
"def fusion_api_create_server_profile_template(self, body, api=None, headers=None):\n return self.profile_template.create(body, api, headers)",
"def update_with_template_args(args, list_args=None):\r\n if not args.get('--template'):\r\n return\r\n\r\n list_args = list_args or []\r\n\r\n template_path = args.pop('--template')\r\n if not os.path.exists(template_path):\r\n raise ArgumentError(\r\n 'File does not exist [-t | --template] = %s'\r\n % template_path)\r\n\r\n config = configparser.ConfigParser()\r\n ini_str = '[settings]\\n' + open(\r\n os.path.expanduser(template_path), 'r').read()\r\n ini_fp = StringIO(ini_str)\r\n config.readfp(ini_fp)\r\n\r\n # Merge template options with the options passed in\r\n for key, value in config.items('settings'):\r\n option_key = '--%s' % key\r\n if option_key in list_args:\r\n value = value.split(',')\r\n if not args.get(option_key):\r\n args[option_key] = value",
"def fusion_api_get_server_profile_new_template(self, uri, api=None, headers=None):\n return self.profile.get(uri=uri, api=api, headers=headers, param=\"/new-profile-template\")",
"def update_template(template, trial):\n assert isinstance(template, dict) or isinstance(template, list)\n items = template.items() if isinstance(template, dict) else enumerate(template)\n\n for key, value in items:\n if isinstance(value, str):\n if value in trial:\n template[key] = trial[value]\n elif isinstance(value, dict) or isinstance(value, list):\n template[key] = ConfigGenerator.update_template(template[key], trial)\n\n return template",
"def edit_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is not existing for editing\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n # { below 3 lines were to avoid a failure caused by 2 CR that had been fixed. leave the 3 lines here as commented in case regression issue in future\n # will remove below once 2 CRs fixed\n # EditServerProfile.select_action_edit()\n # EditServerProfile.wait_edit_server_profile_dialog_shown()\n # EditServerProfile.click_cancel_button()\n # } here is a workaround for 1st time editing server profile (sp template as well) has defect that,\n # can't close dialog by OK/Cancel button, and SAN Storage's OS Type can't be read correctly,\n # so open dialog and use Cancel button to close, then everything goes well when 2nd time open Edit dialog\n\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n BuiltIn().sleep(2)\n EditServerProfile.input_name(profile.newName) if getattr(profile, 'newName', None) is not None else None\n EditServerProfile.input_description(profile.desc) if getattr(profile, 'desc', None) is not None else None\n\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n # 20151021 Alex Ma - discussed with Tony/Alex C and get below agreed:\n # - if 'hardwareType' is defined in test data, then will firstly select/change 'Server hardware type' from UI,\n # then select/change 'Server hardware' if 'server' is defined in test data\n # - if 'hardwareType' is not defined in test data, then will only check 'server' attribute to decide if select/change 'Server hardware' from UI\n if getattr(profile, 'hardwareType', None) is not None:\n if profile.hardwareType not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(profile.hardwareType, timeout=5, fail_if_false=False)\n elif getattr(profile, 'ref_sht_server', None) is not None:\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.ref_sht_server)\n if hardware_type not in sht_selected:\n logger.warn(\"server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, hardware_type))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_server_hardware_type(hardware_type, timeout=5, fail_if_false=False)\n\n eg_selected = EditServerProfile.get_selected_enclosure_group(profile.server)\n if getattr(profile, 'enclgroup', None) is not None:\n if profile.enclgroup not in eg_selected:\n logger.warn(\"enclosure group '%s' of server '%s' is NOT consistent with test data '%s'\" % (eg_selected, profile.server, profile.enclgroup))\n EditServerProfile.ChangeServerHardwareTypeAndEnclosureGroup.change_enclosure_group(profile.enclgroup, timeout=5, fail_if_false=False)\n\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n # if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n # logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n # \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n # continue\n # msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n EditServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n BuiltIn().sleep(3)\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n EditServerProfile.Advanced.set(profile)\n\n EditServerProfile.click_ok_button()\n # logger.debug(\"sleeping for 8 seconds ...\")\n # BuiltIn().sleep(8)\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % profile.name)\n continue\n\n BuiltIn().sleep(2)\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=300) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n profile_name = profile.newName if getattr(profile, 'newName', None) is not None else profile.name\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n FusionUIBase.wait_activity_action_ok(profile_name, 'Update', timeout=timeout, fail_if_false=True)\n FusionUIBase.show_activity_sidebar()\n fail_if_not_ok = not getattr(profile, 'IgnoreWaitForStatusOK', '').lower() == 'true'\n # control whether to stop the case when server profile status is not ok.\n CommonOperationServerProfile.wait_server_profile_status_ok(profile_name, timeout=500, fail_if_false=fail_if_not_ok)\n logger.info(\"edited server profile '%s' successfully\" % profile_name)\n edited += 1\n else:\n logger.info(\"edit server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n edited += 1\n else:\n logger.warn(\"'wait_edit_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n EditServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, test is considered FAILED\" % not_exists)\n return False\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s not-existing server profile(s) is skipped being edited, test is considered FAILED\" % not_exists)\n return False\n else:\n ui_lib.fail_test(\"%s not-existing server profile(s) is skipped being edited, %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def update_service_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], service: Optional[str], color: Optional[str], purpose: str,\n) -> None:\n service_templates = templates['service-templates']\n assert isinstance(service_templates, list)\n for service_template in service_templates:\n if (\n service_template.get('namespace') == namespace\n and service_template.get('service') == service\n and service_template.get('color') == color\n and service_template.get('purpose') == purpose\n ):\n service_template['template'] = source_data\n return\n service_templates.append({\n 'namespace': namespace,\n 'service': service,\n 'color': color,\n 'purpose': purpose,\n 'template': source_data,\n })",
"def test_update_template_profile_for_system_module(self):\n pass",
"def fusion_api_get_server_profile_template_new_profile(self, uri, api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=\"/new-profile\")",
"def update_gateway_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], purpose: str,\n) -> None:\n gateway_templates = templates['gateway-templates']\n assert isinstance(gateway_templates, list)\n for gateway_template in gateway_templates:\n if (\n gateway_template.get('namespace') == namespace\n and gateway_template.get('purpose') == purpose\n ):\n gateway_template['template'] = source_data\n return\n gateway_templates.append({\n 'namespace': namespace,\n 'purpose': purpose,\n 'template': source_data,\n })",
"def fusion_api_delete_server_profile_template(self, name=None, uri=None, api=None, headers=None):\n return self.profile_template.delete(name, uri, api, headers)",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def _main(args):\n if args.files:\n _update_files()\n\n if args.templates:\n _update_template(args.template_definition)",
"def make_ServerProfileTemplateV1(name=None,\n description=None,\n serverProfileDescription=None,\n serverHardwareTypeUri=None,\n enclosureGroupUri=None,\n affinity=None,\n hideUnusedFlexNics=None,\n profileConnectionV4=None,\n firmwareSettingsV3=None,\n bootSettings=None,\n bootModeSetting=None,\n sanStorageV3=None):\n return {\n 'type': 'ServerProfileTemplateV1',\n 'name': name,\n 'description': description,\n 'serverProfileDescription': serverProfileDescription,\n 'serverHardwareTypeUri': serverHardwareTypeUri,\n 'enclosureGroupUri': enclosureGroupUri,\n 'affinity': affinity,\n 'hideUnusedFlexNics': hideUnusedFlexNics,\n 'connections': profileConnectionV4,\n 'firmware': firmwareSettingsV3,\n 'boot': bootSettings,\n 'bootMode': bootModeSetting,\n 'sanStorage': sanStorageV3\n }",
"def delete_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n deleted = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template.name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n else:\n if delete_server_profile_template_by_name(profile_template.name) is False:\n logger.warn(\"server profile template '%s' is NOT deleted successfully, or 'Delete' action is not found in right-side-bar list.\" % profile_template.name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })",
"def profile():\n \n return render_template(\"profile.html\")",
"def update_client(c, stack_name, subdomain, profile, cert_arn=None, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-client',\n '--template-body', f'file://static-site.yaml',\n '--parameters',\n f'ParameterKey=Subdomain,ParameterValue={subdomain}',\n f'ParameterKey=CertificateArn,ParameterValue={cert_arn if cert_arn else \"\"}',\n f'--profile', f'{profile}')",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def _update_template(template_path):\n template_definition = template_path\n\n # template output directory is output/templates, so need to create that location before pulling out the templates\n template_location = template_utilities.get_template_directory()\n\n # Install the template and get the path to the template directory for updating the configuration file.\n templates_path = template_utilities.install_template(template_location, template_definition)\n\n if templates_path:\n # Now need to find the templates definition of that zip file and locate it in the file system so that it can be\n settings = get_configuration()\n\n # Override the configuration details with the new template path. This should probably be handled by the\n # publishing plugin, but for now this will work\n settings.publishing.templates = str(templates_path.relative_to(get_configuration_root()))\n configuration_file_path = get_configuration_root() / 'config.yaml'\n\n dump_configuration(configuration_file_path, settings)",
"def edit_server_profile_for_dl(profile_obj):\n # This keyword is deprecated, please do not use.\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n\n EditServerProfile.input_name(profile.newName)\n EditServerProfile.input_description(profile.desc)\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - verify the server hardware is refreshed to the type name displayed in the drop-down list for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully\")\n ui_lib.fail_test(msg)\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if profile.hardwaretype not in sht_selected:\n logger.warn(\"the server hardware type of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.hardwaretype))\n # set boot mode if attribute 'manageBootMode' is true - only for Gen 9 (or later) server:\n FusionUIBase.select_view_by_name('Boot Settings')\n if 'gen9' in sht_selected.lower():\n logger.info(\"setting 'Boot mode' for Gen 9 specially ...\")\n if getattr(profile, 'manageBootMode', '').lower() == 'true':\n CommonOperationServerProfile.BootSettings.tick_manage_boot_mode()\n CommonOperationServerProfile.BootSettings.select_boot_mode_by_text(profile.bootMode) if hasattr(profile, 'bootMode') else None\n if getattr(profile, 'bootMode', '').lower() == 'legacy bios':\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n else:\n CommonOperationServerProfile.BootSettings.set_non_legacy_bios_mode_boot_order(profile, hardware_type=sht_selected)\n else:\n CommonOperationServerProfile.BootSettings.untick_manage_boot_mode()\n else:\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n\n EditServerProfile.click_ok_button()\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being edited. \"\n \"Test will skip this profile '%s' and continue to edit other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.newName, 'Update', timeout=300, fail_if_false=False)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.newName, timeout=180, fail_if_false=False)\n logger.info(\"edited server profile '%s' successfully\" % profile.newName)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, hence test is considered PASS\" % not_exists)\n return True\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, hence test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, but %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)",
"async def _cmdf_pmtemplate(self, substr, msg, privilege_level):\n if len(substr) == 0:\n await self._client.send_msg(msg, \"Error: No content.\")\n return\n elif len(substr) > 1800: # This value is arbitrary.\n await self._client.send_msg(msg, \"Error: Message is too long.\")\n return\n\n self._pm_msg_template = substr\n self._save_settings()\n\n await self._client.send_msg(msg, \"Successfully set the new PM greeting template. Please double-check.\")\n return",
"def set_template(self, template, templateType, blogid=1):\n return self.execute(\"metaWeblog.setTemplate\", self.appkey, blogid, self.username, self.password, template, templateType)",
"def update(self, filename, template_dir, cache_dir):\n\t\tself.cache_dir = cache_dir\n\t\tif filename.startswith('/'): self.template_file = filename\n\t\telse: self.template_file = os.path.join(template_dir,filename)\n\t\tself.cache_module = re.sub(\"[\\/\\\\\\.: ]\",'_',os.path.realpath(self.template_file))\n\t\tself.cache_file = os.path.join(cache_dir,self.cache_module) + '.py'"
] | [
"0.74402344",
"0.7077035",
"0.6430932",
"0.632963",
"0.63004065",
"0.626157",
"0.62599885",
"0.604261",
"0.5994675",
"0.582607",
"0.57136095",
"0.56366116",
"0.56342983",
"0.55626786",
"0.55037796",
"0.547783",
"0.54008967",
"0.5393341",
"0.5388835",
"0.5370036",
"0.536683",
"0.5365047",
"0.5361711",
"0.53299713",
"0.53169906",
"0.5287274",
"0.52606857",
"0.5255833",
"0.52504396",
"0.52403194"
] | 0.73386496 | 1 |
Deletes server profile templates bulk based on name OR uri. If name AND uri are omitted, ALL templates are deleted. [Arguments] | def fusion_api_delete_server_profile_template(self, name=None, uri=None, api=None, headers=None):
return self.profile_template.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_all_appliance_server_profile_templates():\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n profile_template_name_list = CommonOperationServerProfileTemplate.get_server_profile_template_list()\n\n total = len(profile_template_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_template_name in enumerate(profile_template_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template_name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template_name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template_name)\n not_exists += 1\n else:\n if not delete_server_profile_template_by_name(profile_template_name):\n logger.warn(\"server profile template '%s' is NOT deleted successfully.\" % profile_template_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile template(s) is skipped being deleted, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def test_delete_multiple_templates_success(self):\n template_id_1 = util.MOCK_UUID_1\n template_id_2 = util.MOCK_UUID_2\n\n rv = TEST_CLIENT.post(\n \"/templates/deletetemplates\", json=[template_id_1, template_id_2]\n )\n result = rv.json()\n\n expected = {\"message\": \"Successfully removed templates\"}\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)",
"def delete_server_profile_template(profile_template_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILE_TEMPLATES, time_for_loading=5)\n\n total = len(profile_template_obj)\n not_exists = 0\n deleted = 0\n\n for n, profile_template in enumerate(profile_template_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile template named '%s'\" % profile_template.name)\n if not VerifyServerProfileTemplate.verify_server_profile_template_exist(profile_template.name, fail_if_false=False):\n logger.warn(\"server profile template '%s' does not exist\" % profile_template.name)\n not_exists += 1\n else:\n if delete_server_profile_template_by_name(profile_template.name) is False:\n logger.warn(\"server profile template '%s' is NOT deleted successfully, or 'Delete' action is not found in right-side-bar list.\" % profile_template.name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile template to delete! all %s server profile template(s) is NOT existing, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile template(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s not-existing server profile template(s) is skipped, keyword '%s' returns a 'False'\" % (not_exists, sys._getframe().f_code.co_name))\n return False\n else:\n logger.warn(\"%s not-existing server profile template(s) is skipped, %s profile template(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile template(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def fusion_api_delete_server_profile(self, name=None, uri=None, param='', api=None, headers=None):\n return self.profile.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def bulk_delete(self, **kwargs: Any) -> Response:\n item_ids = kwargs[\"rison\"]\n try:\n BulkDeleteCssTemplateCommand(item_ids).run()\n return self.response(\n 200,\n message=ngettext(\n \"Deleted %(num)d css template\",\n \"Deleted %(num)d css templates\",\n num=len(item_ids),\n ),\n )\n except CssTemplateNotFoundError:\n return self.response_404()\n except CssTemplateBulkDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def delete_gating_templates(self, template_name: str) -> None:\n for g in self.gating_templates:\n if template_name == 'all' or g.template_name == template_name:\n g.delete()\n if template_name == 'all':\n self.gating_templates = []\n else:\n self.gating_templates = [g for g in self.gating_templates if g.template_name != template_name]\n self.save()",
"def people_delete(self, profiles=None, query_params=None, timezone_offset=None, ignore_alias=True, backup=True,\n backup_file=None):\n return self.people_operation('$delete', '', profiles=profiles, query_params=query_params,\n timezone_offset=timezone_offset, ignore_alias=ignore_alias, backup=backup,\n backup_file=backup_file)",
"def delete_tenant_bulk(self, tenant_list, sync=False):",
"def delete_custom_template(self, name, filename, context):\n pass",
"def bak_delete_all_appliance_server_profiles():\n selenium2lib = ui_lib.get_s2l()\n \"\"\" Navigate to Network Page \"\"\"\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n # get the list of networks\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_PROFILE_LIST)\n delete_server_profile([el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)])",
"def destroy_template(name=None, call=None, kwargs=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The destroy_template function must be called with -f.\"\n )\n if kwargs is None:\n kwargs = {}\n name = kwargs.get(\"name\", None)\n session = _get_session()\n vms = session.xenapi.VM.get_all_records()\n ret = {}\n found = False\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n if record[\"name_label\"] == name:\n found = True\n # log.debug(record['name_label'])\n session.xenapi.VM.destroy(vm)\n ret[name] = {\"status\": \"destroyed\"}\n if not found:\n ret[name] = {\"status\": \"not found\"}\n return ret",
"def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False):",
"def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete_network_bulk(self, tenant_id, network_id_list, sync=False):",
"def delete_template():\n posted_json = request.get_json(force=True)\n try:\n name = posted_json['template_name']\n except KeyError:\n print(\"Not all required keys are present!\")\n r = jsonify(message=\"Not all required keys for add template are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n if bootstrapper_utils.delete_template(name):\n return jsonify(success=True, message='Deleted Template Successfully', status_code=200)\n else:\n r = jsonify(success=False, message='Could not delete template', status_code=500)\n r.status_code = 500\n return r",
"def delete_namespaced_template(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_template\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_template`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `delete_namespaced_template`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_template`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/templates/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_get_server_profile_templates(self, uri=None, param='', api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=param)",
"def delete_instance_bulk(self, tenant_id, instance_id_list,\n instance_type, sync=False):",
"def delete_cloudformation_template(self, name, filename, context):\n stack_name = utils.generate_stack_name(context['Stage'], self.name, name)\n utils.delete_cf_stack(\n name=stack_name,\n dry_run=self.dry_run\n )",
"def test_delete_multiple_templates_at_least_one_template_error(self):\n rv = TEST_CLIENT.post(\"/templates/deletetemplates\", json=[])\n result = rv.json()\n\n expected = {\n \"message\": \"inform at least one template\",\n \"code\": \"MissingRequiredTemplateId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)",
"def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def delete_all_appliance_server_profiles(wait_ongoing_task_complete=False):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n profile_name_list = CommonOperationServerProfile.get_server_profile_list()\n if wait_ongoing_task_complete is True:\n CommonOperationServerProfile.wait_server_profile_task_complete()\n\n total = len(profile_name_list)\n not_exists = 0\n deleted = 0\n\n for n, profile_name in enumerate(profile_name_list):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"deleting a server profile named '%s'\" % profile_name)\n if not VerifyServerProfile.verify_server_profile_exist(profile_name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile_name)\n not_exists += 1\n else:\n if not delete_server_profile_by_name(profile_name, force_delete=True):\n logger.warn(\"server profile '%s' is NOT deleted successfully.\" % profile_name)\n continue\n else:\n deleted += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to delete! all %s server profile(s) is NOT existing, test is considered PASS\" % not_exists)\n return True\n else:\n if deleted < total:\n logger.warn(\"not all of the server profile(s) is successfully deleted - %s out of %s deleted \" % (deleted, total))\n if deleted + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being deleted, test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being deleted, %s profile(s) left is failed being deleted \" % (not_exists, total - deleted - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully deleted - %s out of %s \" % (deleted, total))\n return True",
"def delete(self, *names):\n\n return [shard.delete(*keys) for shard, keys\n in self.gather_keys_by_shard(names)]",
"def test_delete_namespaced_template(self):\n pass",
"def delete_survey_templates():\n try:\n survey_templates = driver.find_element_by_class_name('survey-templates')\n template_rows = poll_templates.find_elements_by_class_name('m-datatable__row--even')\n for row in template_rows:\n click_on('delete', scope=row)\n popup = driver.find_element_by_class_name('narcissus_17w311v')\n click_on('delete', scope=popup)\n if verify_alert() != \"Success\":\n return \"Error: no delete alert\"\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"",
"def unset(cls, client, resource, args) :\n try :\n if type(resource) is not list :\n unsetresource = nshttpprofile()\n if type(resource) != type(unsetresource):\n unsetresource.name = resource\n else :\n unsetresource.name = resource.name\n return unsetresource.unset_resource(client, args)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i].name\n result = cls.unset_bulk_request(client, unsetresources, args)\n return result\n except Exception as e :\n raise e",
"def test_delete_collection_namespaced_template(self):\n pass",
"def destroy_resources(self, statuses: [], resource_age_mins: int, cfn_template_names: [] = None):\n resources_to_delete = []\n stacks_to_delete = {}\n all_resources = self._get_all_resources()\n filtered_resources = self._filter_resources_by_age(all_resources, resource_age_mins)\n filtered_resources = self._filter_resources_by_status(filtered_resources, statuses)\n if len(filtered_resources) < 1:\n logger.info(BgColors.WARNING + ' No resources where selected to be destroyed. '\n 'Check your filter options.' + BgColors.ENDC)\n return\n\n logger.info(f' [{len(filtered_resources)}] resources selected to be destroyed.')\n for resource in filtered_resources:\n cfn_file_name = self._get_cfn_template_file_name(resource.cf_template_name)\n # In case if cfn template list is given collect only template name related resources\n if cfn_template_names:\n if cfn_file_name in cfn_template_names:\n dependents = self._find_resource_dependents(resource, all_resources)\n if len(dependents) > 0 and \\\n not self._is_dependent_template_listed(cfn_template_names, dependents.keys()):\n raise Exception(BgColors.FAIL + f'Stack for [{resource.cf_stack_name}] cannot be deleted due '\n f'to following stacks are dependent: '\n f'{list(dependents.values())}. Please delete dependend stacks '\n f'first or list dependend stacks cfn templates together. '\n f'For example if TemplateB stack depends on TemplateA '\n f'stack: -t TemplateB,TemplateA.' + BgColors.ENDC)\n resources_to_delete.append(resource)\n if not stacks_to_delete.get(resource.cf_template_name):\n stacks_to_delete[resource.cf_template_name] = []\n stacks_to_delete.get(resource.cf_template_name).append(resource.cf_stack_name)\n # In case if cfn template list is NOT given collect all resources\n else:\n resources_to_delete.append(resource)\n if not stacks_to_delete.get(resource.cf_template_name):\n stacks_to_delete[resource.cf_template_name] = []\n stacks_to_delete.get(resource.cf_template_name).append(resource.cf_stack_name)\n\n resource_count = len(resources_to_delete)\n if resource_count > 0:\n stack_names = self.dict_array_values_as_list(stacks_to_delete)\n logger.info(f\" Destroying [{resource_count}] cloud formation stacks {stack_names}\")\n with ThreadPoolExecutor(max_workers=10) as t_executor:\n for index in range(resource_count):\n resource_to_delete = resources_to_delete[index]\n t_executor.submit(ResourceTool._delete_resource, resource_to_delete,\n self.cfn_helper, logger, all_resources)\n\n s3_bucket_name = self.get_s3_bucket_name(self.account_id, self.region)\n failed_resources = []\n for resource in ResourceModel.scan():\n if resource.status == ResourceModel.Status.DELETE_FAILED.name:\n logger.error(f'Deleting [{resource.cf_stack_name}] stack failed.')\n failed_resources.append(resource)\n if len(failed_resources) > 0:\n err_message = f'Failed to delete [{ResourceModel.Meta.table_name}] DDB table ' \\\n f'and [{s3_bucket_name}] S3 bucket due CFN stack deletion failure. ' \\\n f'For investigation purpose we do not delete DDB table and S3 bucket ' \\\n f'(feel free to delete DDB table/S3 bucket manually when ready). '\n logger.error(err_message)\n raise Exception(err_message)\n self._delete_s3_files(s3_bucket_name, stacks_to_delete)\n else:\n logger.warning(BgColors.WARNING + f\" Nothing to destroy due to NO resources for template names \"\n f\"{cfn_template_names} found.\" + BgColors.ENDC)",
"def remove_vpn_profile(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n profile_type = kwargs['profile_type']\n\n match profile_type:\n case \"ike\":\n profile = \"ipsec-vpn-ike-profiles\"\n case \"ipsec\":\n profile = \"ipsec-vpn-tunnel-profiles\"\n case \"dpd\":\n profile = \"ipsec-vpn-dpd-profiles\"\n case other:\n print(\"Invalid profile type\")\n sys.exit(1)\n\n json_response_status_code = delete_vpn_profile(proxy, session_token, display_name, profile)\n if json_response_status_code == 200:\n sys.exit(f\"Tier-1 VPN service {display_name} was deleted successfully\")\n else:\n print(f\"There was an error deleting Tier1 VPN service {display_name}\")\n sys.exit(1)"
] | [
"0.6297934",
"0.5755474",
"0.5740489",
"0.56800824",
"0.5628092",
"0.5583906",
"0.5538198",
"0.5437258",
"0.53984183",
"0.5396571",
"0.53511995",
"0.5314962",
"0.5293291",
"0.5268441",
"0.5177119",
"0.5115355",
"0.51009566",
"0.5064228",
"0.50254196",
"0.49820438",
"0.49779505",
"0.4973833",
"0.49335366",
"0.49313757",
"0.49103683",
"0.48992243",
"0.48972848",
"0.48561352",
"0.4851479",
"0.4844605"
] | 0.7412199 | 0 |
Gets a default or paginated collection of Server Profile Templates. [Arguments] | def fusion_api_get_server_profile_templates(self, uri=None, param='', api=None, headers=None):
return self.profile_template.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()",
"def get_templates(self):\n\n data = self.request_from_server('templates')\n self.templates = data",
"def get_schemas(self):\n templates = [['Template GUID']]\n r = self.system_cursor.execute('{Call wtGetTemplateList(%s)}' % (self.dsn['ProfileGuid'],))\n for row in r.fetchall():\n templates.append([row.TEMPLATE_GUID])\n return templates",
"def list_templates(request):\n templates = models.Template.all().order('name')\n return utility.respond(request, 'admin/list_templates', {'templates': templates})",
"def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates",
"def get_templates(self):\n return [{\"id\": tmplt[\"template_id\"], \"name\": tmplt[\"name\"]}\n for tmplt in Template.objects(user_id=self.user_id, active=True)]",
"def list_templates(self):\n raise NotImplementedError()",
"def T(request):\n\treturn all_templates[request.param]",
"def fusion_api_get_server_profile_new_template(self, uri, api=None, headers=None):\n return self.profile.get(uri=uri, api=api, headers=headers, param=\"/new-profile-template\")",
"def get_all_templates(self):\n url = self.base_url + \"v2/template/\"\n\n resp = requests.get(url=url, headers=self.headers)\n return resp.json(), resp.status_code",
"def template_list(self):\n return self.ezx.get_template_list()",
"def templatelist(cls):\n return cls._templatelist",
"def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }",
"def list_profiles(self):\n return self._get(\"posture\", box=BoxList)",
"def get_queryset(self):\n return Template.objects.all()",
"def ListTemplates(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def fusion_api_storage_system_get_templates(self, uri=None, param='', api=None, headers=None):\n return self.system.get_templates(uri=uri, api=api, headers=headers, param=param)",
"def load_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = Profile()\r\n profiles[\"Default\"].Name = \"Default\"\r\n #Some default templates\r\n profiles[\"Default\"].FileTemplate = \"{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}\"\r\n profiles[\"Default\"].FolderTemplate = \"{<publisher>}\\{<imprint>}\\{<series>}{ (<startyear>{ <format>})}\"\r\n \r\n if not lastused:\r\n lastused = [profiles.keys()[0]]\r\n \r\n return profiles, lastused",
"def get_oneoffixx_templates():\n api_client = OneoffixxAPIClient()\n return (\n OneOffixxTemplate(template, template_group.get('localizedName', ''))\n for template_group in api_client.get_oneoffixx_template_groups()\n for template in template_group.get(\"templates\")\n if template.get('metaTemplateId') in whitelisted_template_types\n )",
"def get_templates(self):\n\n return self._templates",
"def load_network_templates(self) -> List:\n try:\n network_templates = self.api.get(host=self.host, endpoint=f\"/api/v1/orgs/{self.oid}/networktemplates\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting network templates:{TextColors.ENDC} {e}\")\n raise e\n self.network_templates = network_templates",
"def templates(self):\n if self._templates is None:\n self._templates = self.get_all_templates()\n return self._templates",
"def get_project_templates(session=konfuzio_session()) -> List[dict]:\n url = get_project_url()\n r = session.get(url=url)\n r.raise_for_status()\n sorted_templates = sorted(r.json()['section_labels'], key=itemgetter('id'))\n return sorted_templates",
"def test_list_templates_no_args(self):\n rv = TEST_CLIENT.get(\"/templates\")\n result = rv.json()\n\n expected = util.MOCK_TEMPLATE_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def list_template(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_template\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/templates'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1TemplateList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_all_templates(cls):\n raise NotImplementedError()",
"def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles",
"def list_templates(extensions: Optional[List[str]] = None) -> List[str]:\n if environment is None or not hasattr(environment, 'loader'):\n return []\n return environment.list_templates(extensions=extensions)",
"def getProfiles(context):\n\n analytics_tool = getToolByName(getSite(), 'portal_analytics')\n # short circuit if user hasn't authorized yet\n if not analytics_tool.is_auth():\n return SimpleVocabulary([])\n\n try:\n profiles = analytics_tool.makeCachedRequest('profiles')\n except error.BadAuthenticationError:\n choices = [('Please authorize with Google in the Google Analytics \\\n control panel.', None)]\n return SimpleVocabulary.fromItems(choices)\n except error.RequestTimedOutError:\n choices = [('The request to Google Analytics timed out. Please try \\\n again later.', None)]\n return SimpleVocabulary.fromItems(choices)\n if profiles:\n unique_choices = {}\n for entry in profiles:\n title = entry.get('name')\n title = crop(title, 40)\n tableId = entry.get('id')\n unique_choices.update({title: tableId})\n choices = unique_choices.items()\n else:\n choices = [('No profiles available', None)]\n return SimpleVocabulary([SimpleTerm(c[1], c[1], c[0]) for c in choices])"
] | [
"0.66311127",
"0.62959546",
"0.61771345",
"0.6101747",
"0.6013465",
"0.59713155",
"0.5837419",
"0.58203477",
"0.5802853",
"0.5784255",
"0.57497895",
"0.5706248",
"0.5664112",
"0.5597598",
"0.5589706",
"0.55824775",
"0.5561664",
"0.5554667",
"0.55245626",
"0.55197805",
"0.5512789",
"0.55036217",
"0.5484336",
"0.5477272",
"0.5471267",
"0.5468049",
"0.54608464",
"0.545044",
"0.5434974",
"0.5415429"
] | 0.7232698 | 0 |
Gets a default or paginated collection of storage Pools. [Arguments] | def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):
return self.pool.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs",
"def storage_pool_get_all(context, marker=None, limit=None, sort_keys=None,\n sort_dirs=None, filters=None, offset=None):\n session = get_session()\n with session.begin():\n # Generate the query\n query = _generate_paginate_query(context, session, models.StoragePool,\n marker, limit, sort_keys, sort_dirs,\n filters, offset,\n )\n # No storage_pool would match, return empty list\n if query is None:\n return []\n return query.all()",
"def getPools(self):\n data = self.connect('get','pools',None)\n return data",
"def storage_pool_get(context, storage_pool_id):\n return _storage_pool_get(context, storage_pool_id)",
"def pool_list(request, format=None):\n if request.method == 'GET':\n pools = storage.models.Pool.objects.all()\n serializer = serializers.PoolSerializer(pools)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = serializers.PoolSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None",
"def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs",
"def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret",
"def pool_list(mnode):\n cmd = \"gluster pool list\"\n return g.run(mnode, cmd)",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def list_pools(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('pools', self.pools_path, retrieve_all,\r\n **_params)",
"def storage_pools(self):\n\t\t# FIXME: same as InstanceWizard.storage_pools\n\t\ttry:\n\t\t\treturn self._storage_pools\n\t\texcept AttributeError:\n\t\t\ttry:\n\t\t\t\tstorage_pools = self.uvmm.storage_pools(self.node_uri)\n\t\t\texcept uvmmd.UvmmError, e:\n\t\t\t\tstorage_pools = ()\n\t\t\tself._storage_pools = dict([(p.name, p) for p in storage_pools])\n\t\t\treturn self._storage_pools",
"def get_all(self, marker=None, limit=None, sort_key='id',\n sort_dir='asc'):\n context = pecan.request.context\n return self._get_nodepool_policies_collection(marker, limit, sort_key,\n sort_dir)",
"def pools(self, summary=True, tags_intersect=None, tags=None):\n return list(self.all_pools(summary=summary, tags=tags, tags_intersect=tags_intersect))",
"def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)",
"def _get_objects(cls, lb, names, minimal=False):\n\n if not names:\n return []\n\n pools = cls.factory.create(names, lb)\n\n if not minimal:\n active_member_count = cls._lbcall(lb, 'get_active_member_count',\n names)\n description = cls._lbcall(lb, 'get_description', names)\n lbmethod = cls._lbcall(lb, 'get_lb_method', names)\n members = cls._lbcall(lb, 'get_member', names)\n minimum_active_member = cls._lbcall(lb, 'get_minimum_active_member',\n names)\n minimum_up_member = cls._lbcall(lb, 'get_minimum_up_member',\n names)\n slow_ramp_time = cls._lbcall(lb, 'get_slow_ramp_time', names)\n statistics = cls._lbcall(lb, 'get_statistics', names)\n\n for idx,pool in enumerate(pools):\n pool._active_member_count = active_member_count[idx]\n pool._description = description[idx]\n pool._lbmethod = lbmethod[idx]\n pool._minimum_active_member = minimum_active_member[idx]\n pool._minimum_up_member = minimum_up_member[idx]\n pool._slow_ramp_time = slow_ramp_time[idx]\n pool._statistics = statistics['statistics'][idx]\n\n pool._members = f5.PoolMember._get_objects(lb, [pool],\n [members[idx]], minimal=True)\n\n return pools",
"def GetApiCollection(resource_type):\n return 'compute.' + resource_type",
"def get_collection(self, address):\n return self.client.get_collections(uri=address)",
"def get_pool():\n app = get_app()\n return app['pool']",
"def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)",
"def get_default_resource_pool(self):\n try:\n return self.client.list_resource_pools()[0]['resource_pool']\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}",
"def test_list_pools_sort(self):\r\n resources = \"pools\"\r\n cmd = pool.ListPool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])",
"def ParseStoragePool(resources, storage_pool, project, location):\n collection = 'compute.storagePools'\n params = {'project': project, 'zone': location}\n storage_pool_ref = resources.Parse(\n storage_pool, collection=collection, params=params\n )\n return storage_pool_ref",
"def all_pools(self, summary: bool = True, tags: List[str] = None, tags_intersect: List[str] = None):\n return self._all_pages(self.pools_page, summary=summary, tags=tags, tags_intersect=tags_intersect)",
"def _get_management_address_pool(self, context):\n session = KeystoneClient().endpoint_cache.get_session_from_token(\n context.auth_token, context.project)\n sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, session)\n return sysinv_client.get_management_address_pool()",
"def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None",
"def pool(self):\n return self._properties.get('pool')",
"def resource_pool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_pool\")"
] | [
"0.6266214",
"0.6186386",
"0.60942817",
"0.5933658",
"0.58059144",
"0.5733373",
"0.57271314",
"0.5716223",
"0.5694731",
"0.5654598",
"0.5603055",
"0.55755794",
"0.5481993",
"0.54776025",
"0.5448125",
"0.54426765",
"0.5428386",
"0.53938913",
"0.5393048",
"0.53884286",
"0.5383339",
"0.5367604",
"0.535207",
"0.5339606",
"0.5272503",
"0.5261313",
"0.5222433",
"0.52032125",
"0.51985645",
"0.51777136"
] | 0.6554763 | 0 |
Deletes storage pool based on name OR uri. [Arguments] | def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):
return self.pool.delete(uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def storage_pool_delete_by_storage(context, storage_id):\n _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete_pool(self, pool):\r\n return self.delete(self.pool_path % (pool))",
"def delete(self):\n self._lbcall('delete_pool', [self._name])",
"def delete_device_pool(arn=None):\n pass",
"def storage_pools_delete(context, storage_pools_id_list):\n session = get_session()\n with session.begin():\n for storage_pool_id in storage_pools_id_list:\n LOG.debug('deleting storage_pool {0}:'.format(storage_pool_id))\n query = _storage_pool_get_query(context, session)\n result = query.filter_by(id=storage_pool_id).delete()\n\n if not result:\n LOG.error(exception.StoragePoolNotFound(storage_pool_id))\n\n return",
"def delete_pool(self, argu):\n\n if not argu:\n LOG.error(\"In delete_pool, it should not pass the None.\")\n\n # delete policy\n self._delete_policy(\n argu['listener_id'],\n argu['session_persistence_type'],\n argu['lb_algorithm']\n )\n\n cmd_apv_no_group = ADCDevice.no_group(argu['pool_id'])\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_no_group)",
"def deletePool(self,ippool_name): \n self.__deletePoolCheckInput(ippool_name)\n ippool_obj=ippool_main.getLoader().getIPpoolByName(ippool_name)\n self.__deletePoolDB(ippool_obj.getIPpoolID())\n ippool_main.getLoader().unloadIPpoolByID(ippool_obj.getIPpoolID())",
"def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)",
"def delete_entity(self, context, pool):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, POOLS_RESOURCE,\n pool.id)\n msg = _(\"NetScaler driver pool removal: %s\") % pool.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)",
"def remove_pool(ctx, pool_name):\n \n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete pool, which is not present.\")\n entryFound = True\n\n binding_dict = config_db.get_table('NAT_BINDINGS')\n if binding_dict and entryFound == False: \n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_name:\n click.echo(\"Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(binding_name))\n entryFound = True\n break\n\n if entryFound == False:\n config_db.set_entry(table, key, None)",
"def delete(uri, backend, context=None):\n if backend:\n loc = location.get_location_from_uri_and_backend(\n uri, backend, conf=CONF)\n store = get_store_from_store_identifier(backend)\n return store.delete(loc, context=context)\n\n LOG.warning('Backend is not set to image, searching all backends based on '\n 'location URI.')\n\n backends = CONF.enabled_backends\n for backend in backends:\n try:\n if not uri.startswith(backends[backend]):\n continue\n\n loc = location.get_location_from_uri_and_backend(\n uri, backend, conf=CONF)\n store = get_store_from_store_identifier(backend)\n return store.delete(loc, context=context)\n except (exceptions.NotFound, exceptions.UnknownScheme):\n continue\n\n raise exceptions.NotFound(_(\"Image not found in any configured backend\"))",
"def delete(name, config, backend, storage, debug):\n setup_lithops_logger(logging.DEBUG)\n\n verify_runtime_name(name)\n\n if config:\n config = load_yaml_config(config)\n\n setup_lithops_logger(logging.DEBUG)\n\n config_ow = set_config_ow(backend, storage, runtime_name=name)\n config = default_config(config, config_ow)\n\n if config['lithops']['mode'] != SERVERLESS:\n raise Exception('\"lithops runtime delete\" command is only valid for serverless backends')\n\n storage_config = extract_storage_config(config)\n internal_storage = InternalStorage(storage_config)\n compute_config = extract_serverless_config(config)\n compute_handler = ServerlessHandler(compute_config, internal_storage)\n\n runtimes = compute_handler.list_runtimes(name)\n for runtime in runtimes:\n compute_handler.delete_runtime(runtime[0], runtime[1])\n runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])\n internal_storage.delete_runtime_meta(runtime_key)",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def delete(self, name):\n self.backend.delete(name)",
"def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def delete_pool(self, context, pool):\n LOG.info(\"Received request 'Delete Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool,\n }\n self._send_event(lb_const.EVENT_DELETE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])",
"def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"async def delete_work_pool(\n work_pool_name: str = Path(..., description=\"The work pool name\", alias=\"name\"),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n):\n\n if work_pool_name.lower().startswith(\"prefect\"):\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Worker pools starting with 'Prefect' are reserved for internal use and can not be deleted.\",\n )\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session, work_pool_name=work_pool_name\n )\n\n await models.workers.delete_work_pool(\n session=session, work_pool_id=work_pool_id, db=db\n )",
"def delete(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a delete() method\"\n )",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)",
"def l7pool_del(env, identifier):\n mgr = SoftLayer.LoadBalancerManager(env.client)\n try:\n mgr.del_lb_l7_pool(identifier)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass",
"def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return",
"def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete(self, request, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_pool,\n pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)"
] | [
"0.7378843",
"0.72491455",
"0.69974256",
"0.6925014",
"0.68403524",
"0.67333746",
"0.6620511",
"0.6578201",
"0.6543454",
"0.6318132",
"0.62581223",
"0.6236893",
"0.6212297",
"0.6185523",
"0.6152458",
"0.60878736",
"0.6049331",
"0.6023816",
"0.60171694",
"0.6000202",
"0.5996862",
"0.5960628",
"0.59605926",
"0.59294945",
"0.591969",
"0.59135115",
"0.5912369",
"0.58994156",
"0.58947414",
"0.58835065"
] | 0.79339015 | 0 |
Gets a collection of Storage systems. [Arguments] | def fusion_api_get_storage_system(self, uri=None, param='', api=None, headers=None):
return self.system.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_all():\n return ItopapiPrototype.find_all(ItopapiStorageSystem)",
"def fusion_api_get_server_profiles_available_storage_systems(self, uri=None, param='', api=None, headers=None):\n param = '/available-storage-systems%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)",
"def get_all_storage(life):\n\treturn [items.get_item_from_uid(item) for item in life['inventory'] if 'max_capacity' in items.get_item_from_uid(item)]",
"def installStorage():\n for name,data in Online.SetupParams.detectors.items():\n s = data['System']\n c = Online.PVSSSystems.controlsMgr(s)\n inst = Installer(c)\n nf = data['SubFarms']\n streams = data['StorageStreams']\n inst.createStorage(name,streams,nf)\n return c",
"def fusion_api_get_server_profiles_available_storage_system(self, uri=None, param='', api=None, headers=None):\n param = '/available-storage-system%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def fetch_storage(self, planet=None):\n return self.fetch_levels(\"resources\", planet, codes.storage)",
"def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)",
"def list_(args):\n osf = _setup_osf(args)\n\n project = osf.project(args.project)\n\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n print(os.path.join(prefix, path))",
"def getSystemsList(self):\r\n\r\n self._logger.debug(\"in API getSystemsList()...\")\r\n\r\n # format url parameters\r\n params = {\r\n \"api_key\": _API_APP_KEY,\r\n \"authentication_token\": self._authToken,\r\n \"user_id\": self._userID\r\n } \r\n\r\n # call the systems API\r\n response = self._call_api(_API_SYSTEMS, params=params)\r\n \r\n # if data was returned, return the systems list\r\n if response is not None and response.status_code == 200:\r\n\r\n return response.json()\r\n\r\n # otherwise return error (False)\r\n else:\r\n return False",
"def fusion_api_storage_system_get_templates(self, uri=None, param='', api=None, headers=None):\n return self.system.get_templates(uri=uri, api=api, headers=headers, param=param)",
"def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list",
"def get_storage_providers() -> List[Union[Type[ProviderApi], Type[StorageProviderInterface]]]:\n return [p for p in ProviderFactory.get_providers() if issubclass(p, StorageProviderInterface)]",
"def get_all(self, isystem_uuid=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n\n return self._get_controller_fs_collection(isystem_uuid, marker, limit,\n sort_key, sort_dir)",
"def getStorageTypes(self, show_all=False):\n types = getStorageTypes()\n if not show_all:\n types = [x for x in types if x['interface'].providedBy(self)]\n return types",
"def get_storagestats(self):\n\n if (\n self.plugin_settings['storagestats.api'].lower() == 'generic'\n or self.plugin_settings['storagestats.api'].lower() == 'list-objects'\n ):\n davhelpers.list_files(self)\n\n elif self.plugin_settings['storagestats.api'].lower() == 'rfc4331':\n davhelpers.rfc4331(self)",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def storage_locations(self) -> Sequence[str]:\n return pulumi.get(self, \"storage_locations\")",
"def get_storage(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Storage Usage Statistics\",\n \"/statistics/systems/storage.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)",
"def list_filesystem(self, headers=None, **kwargs):\n logger.debug('Listing filesystem ...')\n resource = 'account'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._get(params=params, headers=headers)\n return response.json() if response.content else {}",
"def storage_space_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"storage_space_ids\")",
"def collect_system_folders(systems):\n try:\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n json_body = list()\n\n for folder in systems:\n for name in folder[\"systemNames\"]:\n sys_item = dict(\n measurement = \"folders\",\n tags = dict(\n folder_name = folder[\"name\"],\n sys_name = name\n ),\n fields = dict(\n dummy = 0\n )\n )\n json_body.append(sys_item)\n if not CMD.doNotPost:\n client.drop_measurement(\"folders\")\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error(\"Error when attempting to post system folders\")",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def stores(self) -> List[BaseStorage]:\n return ([self._global_store] + list(self.node_stores) +\n list(self.edge_stores))",
"def update_storage_systems_info(self):\n try:\n rc, existing_systems = self.request(\"storage-systems\")\n\n # Mark systems for adding or removing\n for system in self.systems:\n for existing_system in existing_systems:\n if system[\"ssid\"] == existing_system[\"id\"]:\n system[\"current_info\"] = existing_system\n\n if system[\"current_info\"][\"passwordStatus\"] in [\"unknown\", \"securityLockout\"]:\n system[\"failed\"] = True\n self.module.warn(\"Skipping storage system [%s] because of current password status [%s]\"\n % (system[\"ssid\"], system[\"current_info\"][\"passwordStatus\"]))\n if system[\"current_info\"][\"metaTags\"]:\n system[\"current_info\"][\"metaTags\"] = sorted(system[\"current_info\"][\"metaTags\"], key=lambda x: x[\"key\"])\n break\n else:\n self.systems_to_add.append(system)\n\n # Mark systems for removing\n for existing_system in existing_systems:\n for system in self.systems:\n if existing_system[\"id\"] == system[\"ssid\"]:\n\n # Leave existing but undiscovered storage systems alone and throw a warning.\n if existing_system[\"id\"] in self.undiscovered_systems:\n self.undiscovered_systems.remove(existing_system[\"id\"])\n self.module.warn(\"Expected storage system exists on the proxy but was failed to be discovered. Array [%s].\" % existing_system[\"id\"])\n break\n else:\n self.systems_to_remove.append(existing_system[\"id\"])\n except Exception as error:\n self.module.fail_json(msg=\"Failed to retrieve storage systems. Error [%s].\" % to_native(error))",
"def list_systems():\n return sorted(systems.keys())",
"def do_all(self, args):\n args = shlex.split(args)\n my_list = []\n if len(args) == 0:\n for item in models.storage.all().values():\n my_list.append(str(item))\n print(\"\", end=\"\")\n print(\", \".join(my_list), end=\"\")\n print(\"\")\n\n elif args[0] in classes:\n for key in models.storage.all():\n if args[0] in key:\n my_list.append(str(models.storage.all()[key]))\n print(\"\", end=\"\")\n print(\", \".join(my_list), end=\"\")\n print(\"\")\n else:\n print(\"** class doesn't exist **\")",
"def items(self) -> typing.List[\"CSIStorageCapacity\"]:\n return typing.cast(\n typing.List[\"CSIStorageCapacity\"],\n self._properties.get(\"items\"),\n )",
"def storage(self, **kwargs):\n self.logger.debug(f\"Get basic storage data\")\n url_path = 'storage'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)"
] | [
"0.72300583",
"0.67324096",
"0.6577859",
"0.6365303",
"0.63226676",
"0.6316031",
"0.6084149",
"0.6045237",
"0.6035955",
"0.60016465",
"0.59970427",
"0.5992337",
"0.59816015",
"0.5942096",
"0.5889566",
"0.5865372",
"0.5842589",
"0.58372796",
"0.58260256",
"0.5720975",
"0.56882375",
"0.5672583",
"0.5661459",
"0.5658143",
"0.5629301",
"0.5628867",
"0.56108445",
"0.5610154",
"0.55981356",
"0.5587424"
] | 0.6994998 | 1 |
Creates a storage system [Arguments] | def fusion_api_create_storage_system(self, body, api=None, headers=None):
return self.system.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_system(sys_structure):\n pass",
"def do_create_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No name given.\")\n return\n if len(args) == 1:\n self.perror(\"No path given.\")\n return\n if not os.path.isabs(args[1]):\n print(\"Path must be absolute: \" + args[1])\n return\n self.do_coroutine(self._localStorageRoutines.create_volume_routine(args[0], args[1]))",
"def create_infrastructure_storage(config, context, dc):\n print \"### Configuring storage ###\"\n storage = InfrastructureStorage(context)\n tier = storage.configure_tiers(dc, config.get(\"tier\", \"name\"))\n try: \n user = config.get(\"device\", \"user\")\n password= config.get(\"device\", \"password\")\n except NoOptionError:\n user = None\n password = None\n device = storage.create_device(dc, config.get(\"device\", \"name\"),\n StorageTechnologyType.valueOf(config.get(\"device\", \"type\")),\n config.get(\"device\", \"address\"),\n config.get(\"device\", \"address\"),\n user, password)\n\n storage.create_pool(device, tier, config.get(\"pool\", \"name\"))",
"def installStorage():\n for name,data in Online.SetupParams.detectors.items():\n s = data['System']\n c = Online.PVSSSystems.controlsMgr(s)\n inst = Installer(c)\n nf = data['SubFarms']\n streams = data['StorageStreams']\n inst.createStorage(name,streams,nf)\n return c",
"def create(self, filesystem=None):\n raise NotImplementedError()",
"def disk_create(context, values):\n return NotImplemented",
"def create_storage(conf):\n _name = conf.get(\"name\", \"\")\n _cls = importer(conf['class'])\n _kwargs = conf['kwargs']\n _io = importer(_kwargs['io_class'])\n return _cls(_kwargs[\"storage_config\"], name=_name, io_class=_io)",
"def _parse_space_create(self, *cmd):\n self.created = {'storageserver': ''}\n cmd = list(*cmd)\n while cmd:\n param = cmd.pop(0)\n if param == \"-n\":\n self.created['name'] = cmd.pop(0)\n elif param == \"-N\":\n self.created['net'] = cmd.pop(0)\n elif param == \"-s\":\n self.created['size'] = cmd.pop(0)\n elif param == \"--redundancy\":\n self.created['redundancy'] = cmd.pop(0)\n elif param == \"--user\":\n self.created['user'] = cmd.pop(0)\n elif param == \"--user\":\n self.created['user'] = cmd.pop(0)\n elif param == \"--group\":\n self.created['group'] = cmd.pop(0)\n elif param == \"--mode\":\n self.created['mode'] = cmd.pop(0)\n elif param == \"-S\":\n self.created['storageserver'] += cmd.pop(0) + \",\"\n else:\n pass",
"def create(*args):",
"def create():",
"def create():",
"def fusion_api_create_storage_volume(self, body, api=None, headers=None):\n return self.volume.create(body=body, api=api, headers=headers)",
"def __init__(__self__, *,\n create_option: pulumi.Input[Union[str, 'DiskCreateOption']],\n gallery_image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,\n image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,\n logical_sector_size: Optional[pulumi.Input[int]] = None,\n performance_plus: Optional[pulumi.Input[bool]] = None,\n security_data_uri: Optional[pulumi.Input[str]] = None,\n source_resource_id: Optional[pulumi.Input[str]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n upload_size_bytes: Optional[pulumi.Input[float]] = None):\n pulumi.set(__self__, \"create_option\", create_option)\n if gallery_image_reference is not None:\n pulumi.set(__self__, \"gallery_image_reference\", gallery_image_reference)\n if image_reference is not None:\n pulumi.set(__self__, \"image_reference\", image_reference)\n if logical_sector_size is not None:\n pulumi.set(__self__, \"logical_sector_size\", logical_sector_size)\n if performance_plus is not None:\n pulumi.set(__self__, \"performance_plus\", performance_plus)\n if security_data_uri is not None:\n pulumi.set(__self__, \"security_data_uri\", security_data_uri)\n if source_resource_id is not None:\n pulumi.set(__self__, \"source_resource_id\", source_resource_id)\n if source_uri is not None:\n pulumi.set(__self__, \"source_uri\", source_uri)\n if storage_account_id is not None:\n pulumi.set(__self__, \"storage_account_id\", storage_account_id)\n if upload_size_bytes is not None:\n pulumi.set(__self__, \"upload_size_bytes\", upload_size_bytes)",
"def cmd_stor(args):",
"def create_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.create(token, filename=arguments[1])",
"def create_volume(self, size=1, name=None, description=None,\n image=None, check=True):\n metadata = '{0}={1}'.format(config.STEPLER_PREFIX,\n config.STEPLER_PREFIX)\n cmd = 'cinder create ' + str(size) + ' --metadata ' + metadata\n if image:\n cmd += ' --image ' + image\n if name:\n cmd += ' --name ' + moves.shlex_quote(name)\n if description is not None:\n cmd += ' --description ' + moves.shlex_quote(description)\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_AVAILABLE_TIMEOUT, check=check)\n volume_table = output_parser.table(stdout)\n volume = {key: value for key, value in volume_table['values']}\n return volume",
"def storage_factory():\n return storage(transaction.manager, **kwargs)",
"def storage_create(context, values):\n if not values.get('id'):\n values['id'] = uuidutils.generate_uuid()\n\n storage_ref = models.Storage()\n storage_ref.update(values)\n\n session = get_session()\n with session.begin():\n session.add(storage_ref)\n\n return _storage_get(context,\n storage_ref['id'],\n session=session)",
"def create_filesystem(self, filesystem_identifier, headers=None, **kwargs):\n logger.debug('Creating filesystem %s ...', filesystem_identifier)\n resource = 'filesystem'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._put(endpoint=filesystem_identifier, params=params, headers=headers)\n return Command(self, response)",
"def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))",
"def createDisk(self , name):\n return",
"def create(self, **kwargs):\n for key in self.default.keys():\n if key not in kwargs.keys():\n kwargs[key] = self.default[key]\n elif kwargs[key] is None:\n kwargs[key] = self.default[key]\n name = kwargs['NAME']\n path = Path(kwargs['path'])\n new_path = Path(f'{path}/{name}')\n result = os.system(f\"mkdir {new_path}\")\n if result == 0:\n result = self.generate_volume_info(NAME=name, path=kwargs['path'])\n result = self.update_dict([result])\n return result",
"def test_create_system_entire(self):\n pass",
"def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )",
"def _CreateStorageFile(self):\n return sqlite_file.SQLiteStorageFile(storage_type=self._storage_type)",
"def svn_fs_create(*args):\r\n return _fs.svn_fs_create(*args)",
"def _CreateStorageFile(self):\n if self._storage_type == definitions.STORAGE_TYPE_TASK:\n return gzip_file.GZIPStorageFile(storage_type=self._storage_type)\n\n return ZIPStorageFile(\n maximum_buffer_size=self._buffer_size,\n storage_type=self._storage_type)",
"def _mkfs (self,blockDevice,timer):\n # build command string\n fsTypeString = None\n if (self._activeFileSystemConfig.fileSystemType == blinky_generated_enums.FileSystemTypeType.kExt3):\n fsTypeString = \"ext3\"\n if (self._activeFileSystemConfig.fileSystemType == blinky_generated_enums.FileSystemTypeType.kExt4):\n fsTypeString = \"ext4\"\n else:\n self._log(\"unsupported-fs-type\").error(\"file system %s doesn't support type %s\",self._activeFileSystemConfig.fileSystemType)\n return ReturnCodes.kGeneralError\n \n mkfsCmd = self._activeCommandsConfig.mkfs\n mkfsCmdExtras = self._activeCommandsConfig.mkfsExtras\n cmdString = mkfsCmd%{self.BLOCK_DEVICE_COMMAND_ELEMENT:blockDevice,self.TYPE_COMMAND_ELEMENT:fsTypeString}\n\n # update with extra parameters\n cmdString = self.__joinCmdStringWithExtras(cmdString,mkfsCmdExtras)\n\n # run\n stdout,stderr,rc = self._runCommand(cmdString,timer)\n \n if (rc == 0):\n self._log(\"fs-created\").debug2(\"file system was successfully created on block device '%s'\",blockDevice)\n return ReturnCodes.kOk\n else:\n self._log(\"fs-creation-failed\").error(\"file system creation on block device '%s' failed! stderr=%s\",blockDevice,stderr)\n return ReturnCodes.kGeneralError",
"def create():\n pass",
"def _swift_storage_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('storage')\n self._swift_install('storage')\n self._set_onhold('storage')\n self._final_install_touches('storage')"
] | [
"0.68909466",
"0.676477",
"0.6605614",
"0.6419755",
"0.6357417",
"0.6257824",
"0.6233322",
"0.61544156",
"0.6116278",
"0.6047319",
"0.6047319",
"0.6040705",
"0.6036012",
"0.5999103",
"0.59540313",
"0.5937024",
"0.5922111",
"0.5921285",
"0.5895358",
"0.58862776",
"0.5884522",
"0.5873168",
"0.58677965",
"0.5774744",
"0.5765276",
"0.5764933",
"0.573518",
"0.5716349",
"0.571239",
"0.5710004"
] | 0.7146697 | 0 |
Updates a storage system [Arguments] | def fusion_api_update_storage_system(self, body, uri, api=None, headers=None):
return self.system.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_system(self, system):\n try:\n rc, storage_system = self.request(\"storage-systems/%s\" % system[\"ssid\"], method=\"POST\", data=system[\"changes\"])\n except Exception as error:\n self.module.warn(\"Failed to update storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))",
"def do_update(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif not \"{}.{}\".format(args[0], args[1]) in dicti:\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n key = dicti[\"{}.{}\".format(args[0], args[1])]\n setattr(key, args[2], args[3])\n key.save()",
"def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif not args[0] in class_type:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif (\"{}.{}\".format(args[0], args[1]) not in storage.all().keys()):\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n new_dict = models.storage.all()\n tmp = \"{}.{}\".format(args[0], args[1])\n if tmp in new_dict.keys():\n attr = getattr(new_dict[tmp], args[2], \"\")\n setattr(new_dict[tmp], args[2], type(attr)(args[3]))\n new_dict[tmp].save()",
"def fusion_api_update_storage_volume(self, body, uri, api=None, headers=None):\n return self.volume.update(body=body, uri=uri, api=api, headers=headers)",
"def update(self, system, environment_input):\n pass",
"def update_storage_systems_info(self):\n try:\n rc, existing_systems = self.request(\"storage-systems\")\n\n # Mark systems for adding or removing\n for system in self.systems:\n for existing_system in existing_systems:\n if system[\"ssid\"] == existing_system[\"id\"]:\n system[\"current_info\"] = existing_system\n\n if system[\"current_info\"][\"passwordStatus\"] in [\"unknown\", \"securityLockout\"]:\n system[\"failed\"] = True\n self.module.warn(\"Skipping storage system [%s] because of current password status [%s]\"\n % (system[\"ssid\"], system[\"current_info\"][\"passwordStatus\"]))\n if system[\"current_info\"][\"metaTags\"]:\n system[\"current_info\"][\"metaTags\"] = sorted(system[\"current_info\"][\"metaTags\"], key=lambda x: x[\"key\"])\n break\n else:\n self.systems_to_add.append(system)\n\n # Mark systems for removing\n for existing_system in existing_systems:\n for system in self.systems:\n if existing_system[\"id\"] == system[\"ssid\"]:\n\n # Leave existing but undiscovered storage systems alone and throw a warning.\n if existing_system[\"id\"] in self.undiscovered_systems:\n self.undiscovered_systems.remove(existing_system[\"id\"])\n self.module.warn(\"Expected storage system exists on the proxy but was failed to be discovered. Array [%s].\" % existing_system[\"id\"])\n break\n else:\n self.systems_to_remove.append(existing_system[\"id\"])\n except Exception as error:\n self.module.fail_json(msg=\"Failed to retrieve storage systems. Error [%s].\" % to_native(error))",
"def storage_update(context, storage_id, values):\n session = get_session()\n with session.begin():\n query = _storage_get_query(context, session)\n result = query.filter_by(id=storage_id).update(values)\n return result",
"def do_update(self, arg):\n arg = arg.split()\n try:\n h = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif h not in objects.keys():\n print(\"** no instance found **\")\n elif len(arg) <= 2:\n print(\"** attribute name missing **\")\n elif len(arg) <= 3:\n print(\"** value missing **\")\n else:\n setattr(objects[h], arg[2], arg[3])\n storage.save()",
"def disk_update(context, disk_id, values):\n return NotImplemented",
"def update(*args):",
"def _run_system_update(args):\n mem_types = set([\"memory\", \"jvm_opts\"])\n args = defaults.update_check_args(args, \"Could not do upgrade of bcbio_system.yaml\")\n system_file = os.path.join(args.datadir, \"galaxy\", \"bcbio_system.yaml\")\n with open(system_file) as in_handle:\n config = yaml.safe_load(in_handle)\n out = copy.deepcopy(config)\n mems = []\n for attrs in config.get(\"resources\", {}).itervalues():\n for key, value in attrs.iteritems():\n if key in mem_types:\n mems.append((key, value))\n common_mem = _calculate_common_memory(mems)\n for prog, attrs in config.get(\"resources\", {}).iteritems():\n for key, value in attrs.iteritems():\n if key == \"cores\":\n out['resources'][prog][key] = int(args.cores)\n elif key in mem_types:\n out[\"resources\"][prog][key] = _update_memory(key, value, args.memory,\n common_mem)\n bak_file = system_file + \".bak%s\" % datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n shutil.move(system_file, bak_file)\n with open(system_file, \"w\") as out_handle:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)",
"def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n elif args[0] in classes:\n if len(args) > 1:\n k = args[0] + \".\" + args[1]\n if k in models.storage.all():\n if len(args) > 2:\n if len(args) > 3:\n try:\n if isinstance(args[2], datetime) is True:\n pass\n if args[0] in classes:\n if isinstance(args[2], ints) is True:\n args[3] = int(args[3])\n elif isinstance(args[2], floats) is True:\n args[3] = float(args[3])\n except:\n pass\n setattr(models.storage.all()[k], args[2], args[3])\n models.storage.all()[k].save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_update(self, line):\n args = shlex.split(line)\n size = len(args)\n db = models.storage.all()\n if size == 0:\n print(\"** class name missing **\")\n elif not args[0] in self.__names:\n print(\"** class doesn't exist **\")\n elif size == 1:\n print(\"** instance id missing **\")\n elif not (args[0] + \".\" + args[1]) in db:\n print(\"** no instance found **\")\n elif size == 2:\n print(\"** attribute name missing **\")\n elif size == 3:\n print(\"** value missing **\")\n else:\n new_dict = db[args[0] + \".\" + args[1]].to_dict()\n val = args[3]\n if self.is_int(val):\n val = int(val)\n elif self.is_float(val):\n val = float(val)\n new_dict[args[2]] = val\n obj = self.__names[args[0]](**new_dict)\n db[args[0] + \".\" + args[1]] = obj\n models.storage.save()",
"def update(self, args):\n pass",
"def cmd_stor(args):",
"def update(self, class_name, args, stored_objects):\n id_list = [k.split(\".\")[1] for k in stored_objects]\n instance = \"{}.{}\".format(class_name, args[0])\n obj = stored_objects[instance]\n '''convert to the right attribute value type'''\n setattr(obj, args[1], args[2])\n models.storage.save()",
"def updateDropboxStorage(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])",
"def update_volumes():\n print 'do something useful here'",
"def do_update(self, arg):\n args = arg.split()\n object_dict = storage.all()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n print(\"** class doesn't exist **\")\n return\n\n for i in range(len(args)):\n if args[i].startswith('\"') and args[i].endswith('\"'):\n args[i] = args[i][1:-1]\n\n for full_key in object_dict.keys():\n key = full_key.split('.')\n key_id = key[1]\n if args[0] in self.class_dict:\n if args[1] == object_dict[full_key].id:\n setattr(object_dict[full_key], args[2], args[3])\n setattr(object_dict[full_key], \"updated_at\",\n datetime.now())\n storage.save()\n return\n else:\n print(\"** class doesn't exist **\")\n return\n print(\"** no instance found **\")",
"def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def fusion_api_edit_storage_pool(self, body, uri, api=None, headers=None):\n return self.pool.update(body, uri, api=api, headers=headers)",
"def fusion_api_edit_storage_volume_template(self, body, uri, api=None, headers=None):\n return self.template.update(body=body, uri=uri, api=api, headers=headers)",
"def do_update(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if len(args) == 2:\n print(\"** attribute name missing **\")\n return\n if len(args) == 3:\n print(\"** value missing **\")\n return\n if args[0] not in HBNBCommand.valid_classes.keys():\n print(\"** class doesn't exist **\")\n return\n all_objs = storage.all(args[0])\n for k, v in all_objs.items():\n if k == args[1]:\n setattr(v, args[2], args[3])\n storage.save()\n return\n print(\"** no instance found **\")",
"def do_update(self, arg):\n if type(arg) == str:\n arg_list = shlex.shlex(arg)\n arg_list.wordchars += \"-\"\n arg_list = list(arg_list)\n try:\n idx_start = arg_list.index(\"[\")\n idx_end = arg_list.index(\"]\")\n list_str = \"\".join(arg_list[idx_start:idx_end + 1])\n list_str = eval(list_str)\n list_start = arg_list[:idx_start]\n list_end = arg_list[idx_end + 1:]\n arg_list = list_start\n arg_list.append(list_str)\n arg_list.extend(list_end)\n except ValueError:\n pass\n else:\n arg_list = arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n if len(arg_list) == 3 and type(arg_list[2]) == dict:\n obj = storage.all()[key]\n for key, val in arg_list[2].items():\n setattr(obj, key, val)\n obj.save()\n return\n if len(arg_list) < 3:\n print(\"** attribute name missing **\")\n return\n if len(arg_list) < 4:\n print(\"** value missing **\")\n return\n obj = storage.all()[key]\n if type(arg_list[3]) != list:\n arg_list[3].replace('\"', \"\").replace(\"'\", \"\")\n setattr(obj, arg_list[2].replace('\"', \"\").replace(\"'\", \"\"),\n arg_list[3])\n obj.save()",
"def update(openstack_resource, args):\n args = reset_dict_empty_keys(args)\n openstack_resource.update(args)",
"def modify_filesystem(self, update_dict, obj_fs):\n try:\n adv_smb_params = [\n 'is_cifs_sync_writes_enabled',\n 'is_cifs_op_locks_enabled',\n 'is_cifs_notify_on_write_enabled',\n 'is_cifs_notify_on_access_enabled',\n 'cifs_notify_on_change_dir_depth']\n\n cifs_fs_payload = {}\n fs_update_payload = {}\n\n for smb_param in adv_smb_params:\n if smb_param in update_dict.keys():\n cifs_fs_payload.update({smb_param: update_dict[smb_param]})\n\n LOG.debug(\"CIFS Modify Payload: %s\", cifs_fs_payload)\n\n cifs_fs_parameters = obj_fs.prepare_cifs_fs_parameters(\n **cifs_fs_payload)\n\n fs_update_params = [\n 'size',\n 'is_thin',\n 'tiering_policy',\n 'is_compression',\n 'access_policy',\n 'locking_policy',\n 'description',\n 'cifs_fs_parameters']\n\n for fs_param in fs_update_params:\n if fs_param in update_dict.keys():\n fs_update_payload.update({fs_param: update_dict[fs_param]})\n\n if cifs_fs_parameters:\n fs_update_payload.update(\n {'cifs_fs_parameters': cifs_fs_parameters})\n\n if \"snap_sch_id\" in update_dict.keys():\n fs_update_payload.update(\n {'snap_schedule_parameters': {'snapSchedule':\n {'id': update_dict.get('snap_sch_id')}\n }}\n )\n elif \"is_snap_schedule_paused\" in update_dict.keys():\n fs_update_payload.update(\n {'snap_schedule_parameters': {'isSnapSchedulePaused': False}\n })\n\n obj_fs = obj_fs.update()\n resp = obj_fs.modify(**fs_update_payload)\n LOG.info(\"Successfully modified the FS with response %s\", resp)\n changed = True if resp else False\n\n except Exception as e:\n errormsg = \"Failed to modify FileSystem instance id: {0}\" \\\n \" with error {1}\".format(obj_fs.id, str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"async def do_update(self, data):\n old = await self.config()\n\n new = old.copy()\n new.update(data)\n\n verrors = ValidationErrors()\n\n for attr, minlen, maxlen in (\n ('access_key', 5, 20),\n ('secret_key', 8, 40),\n ):\n curlen = len(new.get(attr, ''))\n if curlen < minlen or curlen > maxlen:\n verrors.add(\n f's3_update.{attr}', f'Attribute should be {minlen} to {maxlen} in length'\n )\n\n if not new['storage_path']:\n verrors.add('s3_update.storage_path', 'Storage path is required')\n else:\n await check_path_resides_within_volume(\n verrors, self.middleware, 's3_update.storage_path', new['storage_path']\n )\n\n if not verrors:\n if new['storage_path'].rstrip('/').count('/') < 3:\n verrors.add(\n 's3_update.storage_path',\n 'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'\n )\n else:\n # If the storage_path does not exist, let's create it\n if not os.path.exists(new['storage_path']):\n os.makedirs(new['storage_path'])\n\n if new['certificate']:\n verrors.extend((await self.middleware.call(\n 'certificate.cert_services_validation', new['certificate'], 's3_update.certificate', False\n )))\n\n if new['bindip'] not in await self.bindip_choices():\n verrors.add('s3_update.bindip', 'Please provide a valid ip address')\n\n if verrors:\n raise verrors\n\n new['disks'] = new.pop('storage_path')\n\n await self._update_service(old, new)\n\n if (await self.middleware.call('filesystem.stat', new['disks']))['user'] != 'minio':\n await self.middleware.call(\n 'filesystem.setperm',\n {\n 'path': new['disks'],\n 'mode': str(775),\n 'uid': (await self.middleware.call('dscache.get_uncached_user', 'minio'))['pw_uid'],\n 'gid': (await self.middleware.call('dscache.get_uncached_group', 'minio'))['gr_gid'],\n 'options': {'recursive': True, 'traverse': False}\n }\n )\n\n return await self.config()",
"def update_volume(VolumeId=None, Name=None, MountPoint=None):\n pass",
"def _prepareDiskObject(**kwargs):\n storage_domain_name = kwargs.pop('storagedomain', None)\n\n # Tuple (lun_address, lun_target, lun_id, lun_port)\n lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),\n kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))\n # Tuple (username, password)\n lun_creds = (kwargs.pop('lun_username', None),\n kwargs.pop('lun_password', None))\n type_ = kwargs.pop('type_', None)\n\n storage_connection = kwargs.pop('storage_connection', None)\n\n if lun != (None, None, None, 3260) and storage_connection:\n logger.error(\n \"You cannot set storage connection id and LUN params in one call!\")\n return None\n kwargs.pop('active', None)\n\n disk = kwargs.pop('update', None)\n if disk is None:\n disk = data_st.Disk(**kwargs)\n\n if storage_connection is not None:\n storage = data_st.HostStorage()\n storage.id = storage_connection\n disk.set_lun_storage(storage)\n\n if storage_domain_name is not None:\n storage_domain = STORAGE_DOMAIN_API.find(storage_domain_name,\n NAME_ATTR)\n storage_domains = data_st.StorageDomains()\n storage_domains.add_storage_domain(storage_domain)\n disk.storage_domains = storage_domains\n\n # quota\n quota_id = kwargs.pop('quota', None)\n if quota_id == '':\n disk.set_quota(data_st.Quota())\n elif quota_id:\n disk.set_quota(data_st.Quota(id=quota_id))\n\n if lun != (None, None, None, 3260):\n direct_lun = data_st.LogicalUnit(address=lun[0], target=lun[1],\n id=lun[2], port=lun[3])\n if lun_creds != (None, None):\n direct_lun.set_username(lun_creds[0])\n direct_lun.set_password(lun_creds[1])\n\n logical_units = data_st.LogicalUnits(logical_unit=[direct_lun])\n disk.set_lun_storage(\n data_st.HostStorage(logical_units=logical_units, type_=type_)\n )\n\n # id\n disk_id = kwargs.pop('id', None)\n if disk_id:\n disk.set_id(disk_id)\n\n # read_only\n read_only = kwargs.pop('read_only', None)\n if read_only is not None:\n disk.set_read_only(read_only)\n\n # snapshot\n snapshot = kwargs.pop('snapshot', None)\n if snapshot:\n disk.set_snapshot(snapshot)\n\n # description\n description = kwargs.pop('description', None)\n if description is not None:\n disk.set_description(description)\n\n # qcow_version\n qcow_version = kwargs.pop('qcow_version', None)\n if qcow_version:\n disk.set_qcow_version(qcow_version)\n\n return disk"
] | [
"0.71529925",
"0.6376557",
"0.62272817",
"0.61972594",
"0.6157025",
"0.60285354",
"0.5975653",
"0.59703285",
"0.595285",
"0.59182453",
"0.58770084",
"0.58490056",
"0.5829536",
"0.58065957",
"0.57692236",
"0.5761438",
"0.5749751",
"0.569957",
"0.5686913",
"0.5655247",
"0.55898523",
"0.556407",
"0.55450004",
"0.5517371",
"0.55143446",
"0.551034",
"0.55087745",
"0.5437009",
"0.539567",
"0.5377204"
] | 0.69415796 | 1 |
Deletes storage systems based on name OR uri. [Arguments] | def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):
return self.system.delete(uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete_disks(self, storage_elems):\n raise NotImplementedError()",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def cleanup_infrastructure_storage(config, datacenter):\n print \"Removing storage devices in datacenter %s...\" % datacenter.getName()\n for device in datacenter.listStorageDevices():\n device.delete()",
"def storage_pool_delete_by_storage(context, storage_id):\n _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)",
"def ex_destroy_storage_service(self, name):\n\n response = self._perform_storage_service_delete(self._get_storage_service_path(name))\n self.raise_for_response(response, 200)\n\n return True",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a delete() method\"\n )",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()",
"def delete(self, name):\n result = self.cm.find_name(name)\n path = result[0]['path']\n delete_path = Path(f'{path}/{name}')\n try:\n os.system(f\"rmdir {delete_path}\")\n result[0]['State'] = 'deleted'\n result = self.update_dict(result)\n except:\n Console.error(\"volume is either not empty or not exist\")\n return result",
"def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))",
"def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')",
"def delete(uri, backend, context=None):\n if backend:\n loc = location.get_location_from_uri_and_backend(\n uri, backend, conf=CONF)\n store = get_store_from_store_identifier(backend)\n return store.delete(loc, context=context)\n\n LOG.warning('Backend is not set to image, searching all backends based on '\n 'location URI.')\n\n backends = CONF.enabled_backends\n for backend in backends:\n try:\n if not uri.startswith(backends[backend]):\n continue\n\n loc = location.get_location_from_uri_and_backend(\n uri, backend, conf=CONF)\n store = get_store_from_store_identifier(backend)\n return store.delete(loc, context=context)\n except (exceptions.NotFound, exceptions.UnknownScheme):\n continue\n\n raise exceptions.NotFound(_(\"Image not found in any configured backend\"))",
"def svn_fs_delete_fs(*args):\r\n return _fs.svn_fs_delete_fs(*args)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def fusion_api_delete_server_hardware_types(self, name=None, uri=None, api=None, headers=None):\n return self.types.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()",
"def delete_store(request, store_name):\n # Search for store: if doesn't exist, return different message\n\n storedb = redis.Redis(host=HOST, db=STOREDB)\n\n if store_name not in get_store(request):\n return {\"msg\": store_name + \" does not exist in the database\"}\n \n\n store_docs = storedb.lrange(store_name + \":docs\",0,-1)\n for doc in store_docs:\n storedb.delete(doc)\n\n store_perms = storedb.lrange(store_name + \":perms\",0,-1)\n for perm in store_perms:\n storedb.delete(perm)\n\n storedb.delete(store_name + \":docs\")\n storedb.delete(store_name + \":perms\")\n storedb.lrem(\"store\", store_name, 1)\n\n # Returns message indicating the successful deletion\n return store_name",
"def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)",
"def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)",
"def _cleanup_deleted_os_disks(self):\n try:\n disks = self.disks.list_by_resource_group(\n CONF.azure.resource_group)\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete disks\"\n \" in Azure because %(reason)s\"),\n dict(reason=six.text_type(e)))\n return\n # blobs is and iterable obj, although it's empty.\n if not disks:\n LOG.info(_LI('No residual Disk in Azure'))\n return\n for i in disks:\n if self._is_os_disk(i.name) and not i.owner_id:\n try:\n self.disks.delete(CONF.azure.resource_group, i.name)\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete os disk %(disk)s\"\n \"in Azure because %(reason)s\"),\n dict(disk=i.name,\n reason=six.text_type(e)))\n else:\n LOG.info(_LI(\"Delete residual os disk: %s in\"\n \" Azure\"), i.name)\n else:\n LOG.info(_LI('Delete all residual disks in Azure'))",
"def deleteDropboxStorage(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def delete(name, config, backend, storage, debug):\n setup_lithops_logger(logging.DEBUG)\n\n verify_runtime_name(name)\n\n if config:\n config = load_yaml_config(config)\n\n setup_lithops_logger(logging.DEBUG)\n\n config_ow = set_config_ow(backend, storage, runtime_name=name)\n config = default_config(config, config_ow)\n\n if config['lithops']['mode'] != SERVERLESS:\n raise Exception('\"lithops runtime delete\" command is only valid for serverless backends')\n\n storage_config = extract_storage_config(config)\n internal_storage = InternalStorage(storage_config)\n compute_config = extract_serverless_config(config)\n compute_handler = ServerlessHandler(compute_config, internal_storage)\n\n runtimes = compute_handler.list_runtimes(name)\n for runtime in runtimes:\n compute_handler.delete_runtime(runtime[0], runtime[1])\n runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])\n internal_storage.delete_runtime_meta(runtime_key)"
] | [
"0.6747735",
"0.65936804",
"0.6574242",
"0.6561652",
"0.64618254",
"0.639014",
"0.6384477",
"0.6309357",
"0.6251605",
"0.6201774",
"0.62011224",
"0.61768955",
"0.6151359",
"0.61185914",
"0.60658145",
"0.6045823",
"0.60403293",
"0.60095286",
"0.6000621",
"0.5978106",
"0.5963707",
"0.59507084",
"0.59363675",
"0.5929637",
"0.59290516",
"0.59199953",
"0.5916006",
"0.58981204",
"0.58837473",
"0.5879531"
] | 0.77971196 | 0 |
Returns Reachable Ports of Specified Storage System [Arguments] | def fusion_api_storage_system_get_reachable_ports(self, uri=None, param='', api=None, headers=None):
return self.system.get_reachable_ports(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_all_ram_ports(self):\n return self.RAM_PORT",
"def list_ports(state):\n\tstate.report()",
"def look_for_available_ports():\n available_ports = glob.glob('/dev/ttyACM*')\n print(\"Available porst: \")\n print(available_ports)\n\n return available_ports",
"def list_occupied_adb_ports():\n out = AdbProxy().forward('--list')\n clean_lines = str(out, 'utf-8').strip().split('\\n')\n used_ports = []\n for line in clean_lines:\n tokens = line.split(' tcp:')\n if len(tokens) != 3:\n continue\n used_ports.append(int(tokens[1]))\n return used_ports",
"def list_port(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/ports.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server, while listing ports.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get port list Failed with status %s\"\n % response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Port List : %s \" % output)\n return output[\"ports\"]",
"def list_ports():\n non_working_ports = []\n dev_port = 0\n working_ports = []\n available_ports = []\n while len(non_working_ports) < 6: # if there are more than 5 non working ports stop the testing. \n camera = cv2.VideoCapture(dev_port)\n if not camera.isOpened():\n non_working_ports.append(dev_port)\n print(\"Port %s is not working.\" %dev_port)\n else:\n is_reading, img = camera.read()\n w = camera.get(3)\n h = camera.get(4)\n if is_reading:\n print(\"Port %s is working and reads images (%s x %s)\" %(dev_port,h,w))\n working_ports.append(dev_port)\n else:\n print(\"Port %s for camera ( %s x %s) is present but does not reads.\" %(dev_port,h,w))\n available_ports.append(dev_port)\n dev_port +=1\n return available_ports,working_ports,non_working_ports",
"def cmd_port(args):",
"def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))",
"def taken_ports():\n odoo = 'odoo' if env.api.system != 'wheezy' else 'openerp'\n ports = sudo('grep _port /srv/{odoo}/*/*cfg /srv/{odoo}/*/*/*cfg'\n '|cut -d= -f2|sort|uniq'\n .format(odoo=odoo)).splitlines()\n ports += sudo('grep \\.bind /srv/{odoo}/*/*cfg /srv/{odoo}/*/*/*cfg'\n '|cut -d= -f2|cut -d: -f2|sort|uniq'\n .format(odoo=odoo)).splitlines()\n ports = sorted(set(int(p.strip()) for p in ports if p.strip().isdigit()))\n puts('Already taken ports: {}'.format(', '.join(str(p) for p in ports)))\n if not ports: # assume 8000 is taken and is the first\n ports = [8000]\n return ports",
"def get_ports(self) -> tuple:\n raise NotImplementedError",
"def findOccupiedPorts():\n netstatOutput = cactus_call(parameters=[\"netstat\", \"-tuplen\"], check_output=True)\n ports = set()\n for line in netstatOutput.split(\"\\n\"):\n fields = line.split()\n if len(fields) != 9:\n # Header or other garbage line\n continue\n port = int(fields[3].split(':')[-1])\n ports.add(port)\n logger.debug('Detected ports in use: %s' % repr(ports))\n return ports",
"def list_ports(self, filters=None):\n # If pushdown filters are specified and we do not have batched caching\n # enabled, bypass local caching and push down the filters.\n if filters and self._PORT_AGE == 0:\n return self._list_ports(filters)\n\n if (time.time() - self._ports_time) >= self._PORT_AGE:\n # Since we're using cached data anyway, we don't need to\n # have more than one thread actually submit the list\n # ports task. Let the first one submit it while holding\n # a lock, and the non-blocking acquire method will cause\n # subsequent threads to just skip this and use the old\n # data until it succeeds.\n # Initially when we never got data, block to retrieve some data.\n first_run = self._ports is None\n if self._ports_lock.acquire(first_run):\n try:\n if not (first_run and self._ports is not None):\n self._ports = self._list_ports({})\n self._ports_time = time.time()\n finally:\n self._ports_lock.release()\n # Wrap the return with filter_list so that if filters were passed\n # but we were batching/caching and thus always fetching the whole\n # list from the cloud, we still return a filtered list.\n return _utils._filter_list(self._ports, None, filters or {})",
"def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList",
"def list_ports(self):\n return self.ironic_client.port.list()",
"def _GetPorts(self):\n ports = []\n for start, end in self.term.destination_port:\n if start == end:\n ports.append(str(start))\n else:\n ports.append('%d-%d' % (start, end))\n return ports",
"def ports(self) -> List[int]:\n if self.head_port:\n return [self.head_port]\n else:\n ports = []\n for replica in self.pod_args['pods'][0]:\n if isinstance(replica.port, list):\n ports.extend(replica.port)\n else:\n ports.append(replica.port)\n return ports",
"def get(self, *args):\n return _libsbml.ListOfPorts_get(self, *args)",
"def get_all_port(self, conf, dpid):\n\t\tpass",
"def port_list(self):\n return self._port_list",
"def exposed_ports(self) -> list[\"Port\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"exposedPorts\", _args)\n _ctx = Port(_ctx)._select_multiple(\n _description=\"description\",\n _port=\"port\",\n _protocol=\"protocol\",\n )\n return _ctx.execute_sync(list[Port])",
"def list(self, tenant=None, network=None, status=None, device_id=None,\n security_groups=None):\n path = '%s/ports' % self.ver \n \n query = {}\n if tenant is not None:\n query['tenant_id'] = tenant\n if network is not None:\n query['network_id'] = network\n if status is not None:\n query['status'] = status\n if device_id is not None:\n query['device_id'] = device_id\n if security_groups is not None:\n query['security_groups'] = security_groups \n path = '%s?%s' % (path, urlencode(query))\n \n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack ports: %s' % truncate(res))\n return res[0]['ports']",
"def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']",
"def netstat(self):\n \n command = 'netstat -utn'\n lines = subprocess.check_output(command, shell=True).split('\\n')[2:]\n \n\tports = {'tcp':[], 'udp':[]}\n\tfor line in lines:\n\t if len(line) < 4:\n\t continue\n\t\t\n\t words = line.split()\n\t port = int(words[3].split(':')[-1])\n\t lst = ports[words[0]]\n\t if port in lst:\n\t continue\n\t lst.append(port)\n\t \n\tports['tcp'].sort()\n\tports['udp'].sort()\n\t\n\treturn ports",
"def alloc_ports():\n # adb uses ports in pairs\n PORT_WIDTH = 2\n\n # We can't actually reserve ports atomically for QEMU, but we can at\n # least scan and find two that are not currently in use.\n min_port = ADB_BASE_PORT\n while True:\n alloced_ports = []\n for port in range(min_port, min_port + PORT_WIDTH):\n # If the port is already in use, don't hand it out\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n break\n except IOError:\n alloced_ports += [port]\n if len(alloced_ports) == PORT_WIDTH:\n return alloced_ports\n\n # We could increment by only 1, but if we are competing with other\n # adb sessions for ports, this will be more polite\n min_port += PORT_WIDTH",
"def get_ports(cls):\n return cls._open_ports.copy()",
"def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data",
"def list_ports():\n print '\\nHere is the list of available ports on this machine:'\n # lp.comports returns a list of (port, description, hardware ID) tuples\n iterator = sorted(lp.comports())\n for port, desc, hwid in iterator:\n print port\n exit()",
"def do_command(self, args):\n hostops = dbops.Hosts()\n listing = hostops.list(args)\n ordering = ['host_name', 'host_memory', 'host_cores',\n 'is_64bit', 'is_enabled']\n do_list(listing, ordering)",
"def list(args):\n if args.remote == 'local':\n if len(args.cache) > 0:\n sys.stdout.write('\\n'.join(args.cache) + '\\n')\n else:\n if len(args.remote_cache) > 0:\n sys.stdout.write('\\n'.join(args.remote_cache) + '\\n')\n return",
"def get_ports(self) -> tuple:\n return self._current_dev_manager.get_ports()"
] | [
"0.6260427",
"0.6184044",
"0.5920856",
"0.5918714",
"0.5892136",
"0.5839744",
"0.5819546",
"0.58051527",
"0.57387537",
"0.5708297",
"0.5635481",
"0.5631053",
"0.56215024",
"0.55690616",
"0.5567119",
"0.5548749",
"0.5545921",
"0.554013",
"0.5537568",
"0.55270517",
"0.5516075",
"0.5512461",
"0.55065197",
"0.55057603",
"0.54987055",
"0.54910594",
"0.5472965",
"0.5426075",
"0.54022115",
"0.5401739"
] | 0.66462207 | 0 |
Returns Templates of Specified Storage System [Arguments] | def fusion_api_storage_system_get_templates(self, uri=None, param='', api=None, headers=None):
return self.system.get_templates(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)",
"def find_templates(self, name):\n script = (\n 'Get-SCVMTemplate -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVMTemplate(system=self, raw=tmpl_data) for tmpl_data in data]\n return [SCVMTemplate(system=self, raw=data)]",
"def T(request):\n\treturn all_templates[request.param]",
"def get_templates(instrument=''):\n import os, json\n template_path = os.path.dirname(__file__)\n template_names = [fn\n for fn in os.listdir(template_path)\n if fn.endswith(\".json\") and fn.startswith(instrument)]\n templates = dict([(tn[len(instrument)+1:-5],\n json.loads(open(os.path.join(template_path, tn), 'r').read()))\n for tn in template_names])\n return templates",
"def get_schemas(self):\n templates = [['Template GUID']]\n r = self.system_cursor.execute('{Call wtGetTemplateList(%s)}' % (self.dsn['ProfileGuid'],))\n for row in r.fetchall():\n templates.append([row.TEMPLATE_GUID])\n return templates",
"def template_list(call=None):\n templates = {}\n session = _get_session()\n vms = session.xenapi.VM.get_all()\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n templates[record[\"name_label\"]] = record\n return templates",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack00.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(self.boxsize)+\",\"+str(self.boxsize)\n\t\t\t\t+\" spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()",
"def get_template(type):\n # read model options file from Cloud Storage\n content = storage.read_file('templates/' + type + '.yaml')\n return Response(content, status=200, mimetype='application/text')",
"def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])",
"def subcmd_getstorage_main(args, parameter_info):\n \n from get_storage_inventory import get_storage_inventory\n result = get_storage_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])",
"def list_templates(request):\n templates = models.Template.all().order('name')\n return utility.respond(request, 'admin/list_templates', {'templates': templates})",
"def get_generation_settings(options, args, error_fcn):\n if not args:\n error_fcn(\"At least one argument (the template name) is required.\")\n\n template_name = args[0]\n template_dir = resolve_template_path(template_name)\n data = fileinput.input(files=args[1:], openhook=fileinput.hook_compressed)\n data = (\n simplejson.loads(''.join(data))\n if options.single_json_blob\n else map(simplejson.loads, data)\n )\n\n # check that the template / appropriate template files exist.\n if not template_dir:\n error_fcn(\n \"Couldn't find your template {0} among global templates \"\n \"or in ~/.config/data_view_templates\".format(template_name)\n )\n\n special_template_files = {}\n for filename, (required, loader) in SPECIAL_TEMPLATE_FILES.iteritems():\n if os.path.isfile(os.path.join(template_dir, filename)):\n if loader:\n with open(os.path.join(template_dir, filename)) as f:\n special_template_files[filename] = loader(f)\n elif required:\n error_fcn(\"Required template file {0} was not found\".format(filename))\n\n return GenerationSettings(\n system_template_dir=SYSTEM_TEMPLATE_DIR,\n template_dir=template_dir,\n out_dir=(\n options.output_directory or\n '{0}-{1}'.format(template_name, date_for_directory_name())\n ),\n data=data,\n special_template_files=special_template_files,\n )",
"def list_templates(self):\n raise NotImplementedError()",
"def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)",
"def getStudyTemplates(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n items = []\n con.cursor().callproc('qiime_assets.get_study_templates', [study_id, results])\n for row in results:\n items.append(row[0])\n return items\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def test_get_device_templates(self):\n pass",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def get_templates(self, template_name, **kwargs):\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return text",
"def get_templates(self):\n\n data = self.request_from_server('templates')\n self.templates = data",
"def fusion_api_get_storage_system(self, uri=None, param='', api=None, headers=None):\n return self.system.get(uri=uri, api=api, headers=headers, param=param)",
"def test_get_templates_in_virtualization_realm(self):\n pass",
"def list_(args):\n osf = _setup_osf(args)\n\n project = osf.project(args.project)\n\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n print(os.path.join(prefix, path))",
"def defineProcessTemplates(histos):\n\n templates=[]\n\n #nominal\n templates.append( histos[0] )\n nomStats=templates[-1].Integral()\n\n #systematic variations\n #if Up/Down already in the name store directly updating the name\n #if not, mirror the variation given \n for i in xrange(1,len(histos)): \n templates.append( histos[i] )\n key=templates[-1].GetName()\n if not 'Up' in key and not 'Down' in key :\n templates[-1].SetName(key+'Up')\n templates.append( histos[i].Clone(key+'Down') )\n for xbin in range(templates[0].GetNbinsX()):\n templates[-1].SetBinContent(xbin+1,2*templates[0].GetBinContent(xbin+1)-templates[-2].GetBinContent(xbin+1))\n \n #don't leave bins with 0's\n for h in templates:\n h.SetDirectory(0)\n iStats=h.Integral()\n if iStats>0: h.Scale(nomStats/iStats)\n for xbin in range(h.GetNbinsX()):\n if h.GetBinContent(xbin+1)>0: continue\n h.SetBinContent(xbin+1,1e-6)\n \n return templates",
"def get_template(self, name, args):\n key = name, len(args)\n template = self.templates.get(key)\n if not template:\n raise mio.MIOException('Undefined template \"%s/%d\"' % (name, len(args)))\n return template",
"def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)",
"def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids",
"def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }",
"def get_templates(self, template_name, **kwargs):\n html = render_template(\"{template}.html\".format(template=template_name), **kwargs)\n text = render_template(\"{template}.txt\".format(template=template_name), **kwargs)\n return html, text",
"def get_objects(si, args):\n # Get datacenter object.\n datacenter_list = si.content.rootFolder.childEntity\n \"\"\"\n if args.datacenter_name:\n datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)\n else:\n \"\"\"\n datacenter_obj = datacenter_list[0]\n\n # Get datastore object.\n datastore_list = datacenter_obj.datastoreFolder.childEntity\n \"\"\"if args.datastore_name:\n datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)\n elif len(datastore_list) > 0:\"\"\"\n datastore_obj = datastore_list[0]\n #else:\n # print \"No datastores found in DC (%s).\" % datacenter_obj.name\n\n # Get cluster object.\n cluster_list = datacenter_obj.hostFolder.childEntity\n \"\"\"if args.cluster_name:\n cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)\n elif len(cluster_list) > 0:\"\"\"\n cluster_obj = cluster_list[0]\n #else:\n # print \"No clusters found in DC (%s).\" % datacenter_obj.name\n\n # Generate resource pool.\n resource_pool_obj = cluster_obj.resourcePool\n\n return {\"datacenter\": datacenter_obj,\n \"datastore\": datastore_obj\n ,\"resource pool\": resource_pool_obj}"
] | [
"0.6130852",
"0.5904958",
"0.5899262",
"0.5819744",
"0.57762927",
"0.5706322",
"0.56789964",
"0.56496763",
"0.5636637",
"0.56168526",
"0.56025994",
"0.55965084",
"0.5466135",
"0.5463602",
"0.54628754",
"0.5458414",
"0.54406357",
"0.5440389",
"0.5435754",
"0.54191256",
"0.5416719",
"0.5310162",
"0.53019494",
"0.5292851",
"0.52871823",
"0.5278793",
"0.5275675",
"0.52657735",
"0.5261044",
"0.5251056"
] | 0.715283 | 0 |
Createsa storage volume template [Arguments] | def fusion_api_create_storage_volume_template(self, body, api=None, headers=None):
return self.template.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_create_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No name given.\")\n return\n if len(args) == 1:\n self.perror(\"No path given.\")\n return\n if not os.path.isabs(args[1]):\n print(\"Path must be absolute: \" + args[1])\n return\n self.do_coroutine(self._localStorageRoutines.create_volume_routine(args[0], args[1]))",
"def create_volume(c,i):\n return c.volumes.create(\n size = \"10\",\n display_name = \"instantserver-1\",\n display_description = \"Volume for instantserver-1\",\n imageRef = i\n )",
"def _create_volume(self):\n vol = {}\n vol['size'] = 1\n vol['availability_zone'] = 'test'\n return db.volume_create(self.context, vol)['id']",
"def create_volume(self, size=1, name=None, description=None,\n image=None, check=True):\n metadata = '{0}={1}'.format(config.STEPLER_PREFIX,\n config.STEPLER_PREFIX)\n cmd = 'cinder create ' + str(size) + ' --metadata ' + metadata\n if image:\n cmd += ' --image ' + image\n if name:\n cmd += ' --name ' + moves.shlex_quote(name)\n if description is not None:\n cmd += ' --description ' + moves.shlex_quote(description)\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_AVAILABLE_TIMEOUT, check=check)\n volume_table = output_parser.table(stdout)\n volume = {key: value for key, value in volume_table['values']}\n return volume",
"def create(args, **_):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n for property_name in constants.VOLUME_REQUIRED_PROPERTIES:\n utils.validate_node_property(property_name, ctx.node.properties)\n\n if _create_external_volume():\n return\n\n ctx.logger.debug('Creating EBS volume')\n\n create_volume_args = dict(\n size=ctx.node.properties['size'],\n zone=ctx.node.properties[constants.ZONE]\n )\n\n create_volume_args.update(args)\n\n try:\n new_volume = ec2_client.create_volume(**create_volume_args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n ctx.instance.runtime_properties[constants.ZONE] = new_volume.zone\n\n utils.set_external_resource_id(\n new_volume.id, ctx.instance, external=False)",
"def create_volume(self, volume):\n # Generate App Instance, Storage Instance and Volume\n # Volume ID will be used as the App Instance Name\n # Storage Instance and Volumes will have standard names\n policies = self._get_policies_for_resource(volume)\n num_replicas = int(policies['replica_count'])\n storage_name = policies['default_storage_name']\n volume_name = policies['default_volume_name']\n\n app_params = (\n {\n 'create_mode': \"openstack\",\n 'uuid': str(volume['id']),\n 'name': _get_name(volume['id']),\n 'access_control_mode': 'deny_all',\n 'storage_instances': {\n storage_name: {\n 'name': storage_name,\n 'volumes': {\n volume_name: {\n 'name': volume_name,\n 'size': volume['size'],\n 'replica_count': num_replicas,\n 'snapshot_policies': {\n }\n }\n }\n }\n }\n })\n self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)",
"def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )",
"def fusion_api_create_storage_volume(self, body, api=None, headers=None):\n return self.volume.create(body=body, api=api, headers=headers)",
"def build_create_volume_command(vol_name, vol_type, ondisk_storage, repl_count, transport, si):\n\n return_dict = None\n try:\n # Now build the command based on parameters provided\n cmd = 'gluster volume create %s ' % vol_name\n if 'replicate' in vol_type.lower():\n cmd = cmd + ' replica %d ' % repl_count\n vol_type = \"replicated\"\n cmd = cmd + ' transport %s ' % transport\n\n anl, err = _get_allowable_node_list(si)\n if err:\n raise Exception(err)\n\n d = {}\n if not anl:\n raise Exception(\n \"No appropriate storage available to create the volume\")\n\n d, err = build_create_or_expand_volume_command(\n cmd, si, anl, vol_type, ondisk_storage, repl_count, vol_name)\n if err:\n raise Exception(err)\n if \"cmd\" in d:\n d[\"cmd\"] = d[\"cmd\"] + \" --xml\"\n except Exception, e:\n return None, 'Error building create volume command: %s' % str(e)\n else:\n return d, None",
"def _create_volume(display_name='test_volume',\n display_description='this is a test volume',\n status='available',\n size=1,\n project_id=fake.PROJECT_ID,\n attach_status=fields.VolumeAttachStatus.DETACHED):\n vol = {}\n vol['host'] = 'fake_host'\n vol['size'] = size\n vol['user_id'] = fake.USER_ID\n vol['project_id'] = project_id\n vol['status'] = status\n vol['display_name'] = display_name\n vol['display_description'] = display_description\n vol['attach_status'] = attach_status\n vol['availability_zone'] = 'fake_zone'\n vol['volume_type_id'] = fake.VOLUME_TYPE_ID\n return db.volume_create(context.get_admin_context(), vol)['id']",
"def create_volume(self, vol_name, sg_name, size, cap_unit):\n try:\n if self.module.params['vol_name'] is None:\n self.show_error_exit(msg='vol_name is required'\n ' during volume creation')\n LOG.info(\"SG MSG: %s \", sg_name)\n remote_array = None\n remote_array_sg = None\n remote_array_1 = None\n remote_array_1_sg = None\n remote_array_2 = None\n remote_array_2_sg = None\n vol_id = None\n\n # Check SRDF protected SG\n if sg_name is not None:\n storage_group = self.get_storage_group(sg_name)\n if (storage_group is not None and\n self.if_srdf_protected(storage_group)):\n array_id = self.module.params['serial_no']\n array_details = self.common.get_array(array_id=array_id)\n if utils.parse_version(array_details['ucode']) \\\n < utils.parse_version(self.foxtail_version):\n msg = (\"Creating new volumes on SRDF protected\"\n \" storage groups is supported from\"\n \" v5978.444.444 onward. Please upgrade the\"\n \" array for this support.\")\n self.show_error_exit(msg=msg)\n rdfg_list = self.replication.\\\n get_storage_group_srdf_group_list(\n storage_group_id=sg_name)\n\n # Multisite configuration\n if len(rdfg_list) == 2:\n LOG.info(\"Concurrent configuration detected \"\n \"for %s\", sg_name)\n rdfg_details = self.replication.\\\n get_rdf_group(rdf_number=rdfg_list[0])\n remote_array_1 = rdfg_details['remoteSymmetrix']\n remote_array_1_sg = sg_name\n rdfg_details = self.replication. \\\n get_rdf_group(rdf_number=rdfg_list[1])\n remote_array_2 = rdfg_details['remoteSymmetrix']\n remote_array_2_sg = sg_name\n msg = ('Creating volume with parameters:'\n 'storage_group_id= ', sg_name,\n ', num_vols= ', 1,\n ', vol_size= ', size,\n ', cap_unit= ', cap_unit,\n ', vol_name= ', vol_name,\n ', create_new_volumes= ', True,\n ', remote_array_1_id= ',\n remote_array_1,\n ', remote_array_1_sgs= ',\n remote_array_1_sg,\n ', remote_array_2_id= ',\n remote_array_2,\n ', remote_array_2_sgs= ',\n remote_array_2_sg\n )\n LOG.info(msg)\n if not self.module.check_mode:\n self.provisioning.add_new_volume_to_storage_group(\n storage_group_id=sg_name, num_vols=1,\n vol_size=size,\n cap_unit=cap_unit, vol_name=vol_name,\n create_new_volumes=True,\n remote_array_1_id=remote_array_1,\n remote_array_1_sgs=remote_array_1_sg,\n remote_array_2_id=remote_array_2,\n remote_array_2_sgs=remote_array_2_sg)\n vol_id = self.provisioning.find_volume_device_id(\n volume_name=vol_name)\n LOG.info('Created volume native ID: %s', vol_id)\n return vol_id\n\n elif len(rdfg_list) > 2:\n err_msg = (\"More than 2 rdf groups exists for the \"\n \"given storage group %s. Create volume is \"\n \"not supported.\", sg_name)\n self.show_error_exit(msg=err_msg)\n\n rdfg_details = self.replication. \\\n get_rdf_group(rdf_number=rdfg_list[0])\n remote_array = rdfg_details['remoteSymmetrix']\n remote_array_sg = sg_name\n\n # Create new volume and add to storage group\n msg = ('Creating volume with parameters:'\n 'storage_group_id= ', sg_name,\n ', num_vols= ', 1,\n ', vol_size= ', size,\n ', cap_unit= ', cap_unit,\n ', vol_name= ', vol_name,\n ', create_new_volumes= ', True,\n ', remote_array_1_id= ',\n remote_array_1,\n ', remote_array_1_sgs= ',\n remote_array_1_sg)\n LOG.info(msg)\n if not self.module.check_mode:\n self.provisioning.add_new_volume_to_storage_group(\n storage_group_id=sg_name, num_vols=1, vol_size=size,\n cap_unit=cap_unit, vol_name=vol_name,\n create_new_volumes=True, remote_array_1_id=remote_array,\n remote_array_1_sgs=remote_array_sg)\n vol_id = self.provisioning.find_volume_device_id(\n volume_name=vol_name)\n LOG.info('Created volume native ID: %s', vol_id)\n return vol_id\n except Exception as e:\n error_message = 'Create volume %s failed with error %s' \\\n % (vol_name, str(e))\n self.show_error_exit(msg=error_message)",
"def create_volume(self, volume):\n LOG.debug('SPDK create volume')\n\n return self._create_volume(volume)",
"def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)",
"def create_new_volume(self, volumeInfo, change_name=True):\n size = volumeInfo.get(SVC_KEY_VDISK_CAPACITY)\n if (change_name):\n new_volume_name = self._get_new_volume_name(\n volumeInfo.get(SVC_KEY_VDISK_NAME))\n else:\n new_volume_name = volumeInfo.get(SVC_KEY_VDISK_NAME)\n if SVC_KEY_VOLUME_GROUP in volumeInfo:\n volumeGroup = volumeInfo.get(SVC_KEY_VOLUME_GROUP)\n elif self.dft_stg_pool:\n volumeGroup = self.dft_stg_pool\n else:\n volumeGroup = self.get_mdisk_grp_by_size(size)\n\n if volumeGroup is None:\n raise SVCNoSANStoragePoolException\n\n # iogrp parameter should not use name since it could be\n # customized. It is always safe to use iogrp 0.\n cmd = \"svctask mkvdisk -name %s -iogrp 0 -mdiskgrp %s \" \\\n \"-size %s -unit b\" % (new_volume_name, volumeGroup, size)\n\n output, err_output = self._svc_command(cmd)\n\n volume_uid = self.get_uid(new_volume_name)\n\n # Check if it got created\n if not volume_uid:\n # The SVC message of out of space is not really user friendly.\n # So, we will manully check whether the pool ran out of space\n free_capacity = self.get_mdisk_grp_size(volumeGroup)\n\n if float(size) > float(free_capacity):\n ex_args = {'pool_name': volumeGroup,\n 'size': size,\n 'free_capacity': free_capacity}\n raise SVCVolumeGroupOutOfSpace(**ex_args)\n if err_output:\n ex_args = {'new_volume_name': new_volume_name,\n 'err_output': err_output}\n raise SVCVolumeCreationFailed(**ex_args)\n else:\n # failed to create volume but with no error msg\n # really shouldn't hit this condition\n ex_args = {'cmd': cmd,\n 'e': _(\"No error available\")}\n raise SVCCommandException(**ex_args)\n\n return new_volume_name, volume_uid",
"def create_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.create(token, filename=arguments[1])",
"def create(self, **kwargs):\n for key in self.default.keys():\n if key not in kwargs.keys():\n kwargs[key] = self.default[key]\n elif kwargs[key] is None:\n kwargs[key] = self.default[key]\n name = kwargs['NAME']\n path = Path(kwargs['path'])\n new_path = Path(f'{path}/{name}')\n result = os.system(f\"mkdir {new_path}\")\n if result == 0:\n result = self.generate_volume_info(NAME=name, path=kwargs['path'])\n result = self.update_dict([result])\n return result",
"def snap_create(mnode, volname, snapname, timestamp=False,\n description='', force=False):\n\n if description != '':\n description = \"description '%s'\" % description\n\n tstamp = ''\n if not timestamp:\n tstamp = \"no-timestamp\"\n\n frce = ''\n if force:\n frce = 'force'\n\n cmd = (\"gluster snapshot create %s %s %s %s %s\"\n % (snapname, volname, tstamp, description, frce))\n return g.run(mnode, cmd)",
"def test_create_template_subsciption(self):\n pass",
"def execute(self,\n context: context.RequestContext,\n optional_args: dict,\n **kwargs) -> dict[str, Any]:\n\n src_volid = kwargs.get('source_volid')\n src_vol = None\n if src_volid is not None:\n src_vol = objects.Volume.get_by_id(context, src_volid)\n bootable = False\n if src_vol is not None:\n bootable = src_vol.bootable\n elif kwargs.get('snapshot_id'):\n snapshot = objects.Snapshot.get_by_id(context,\n kwargs.get('snapshot_id'))\n volume_id = snapshot.volume_id\n snp_vol = objects.Volume.get_by_id(context, volume_id)\n if snp_vol is not None:\n bootable = snp_vol.bootable\n availability_zones = kwargs.pop('availability_zones')\n volume_properties = {\n 'size': kwargs.pop('size'),\n 'user_id': context.user_id,\n 'project_id': context.project_id,\n 'status': 'creating',\n 'attach_status': fields.VolumeAttachStatus.DETACHED,\n 'encryption_key_id': kwargs.pop('encryption_key_id'),\n # Rename these to the internal name.\n 'display_description': kwargs.pop('description'),\n 'display_name': kwargs.pop('name'),\n 'multiattach': kwargs.pop('multiattach'),\n 'bootable': bootable,\n }\n if len(availability_zones) == 1:\n volume_properties['availability_zone'] = availability_zones[0]\n\n # Merge in the other required arguments which should provide the rest\n # of the volume property fields (if applicable).\n volume_properties.update(kwargs)\n volume = objects.Volume(context=context, **volume_properties)\n volume.create()\n\n # FIXME(dulek): We're passing this volume_properties dict through RPC\n # in request_spec. This shouldn't be needed, most data is replicated\n # in both volume and other places. We should make Newton read data\n # from just one correct place and leave just compatibility code.\n #\n # Right now - let's move it to versioned objects to be able to make\n # non-backward compatible changes.\n\n volume_properties = objects.VolumeProperties(**volume_properties)\n\n return {\n 'volume_id': volume['id'],\n 'volume_properties': volume_properties,\n # NOTE(harlowja): it appears like further usage of this volume\n # result actually depend on it being a sqlalchemy object and not\n # just a plain dictionary so that's why we are storing this here.\n #\n # In the future where this task results can be serialized and\n # restored automatically for continued running we will need to\n # resolve the serialization & recreation of this object since raw\n # sqlalchemy objects can't be serialized.\n 'volume': volume,\n }",
"def test_create_volume_name_creation_fail(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n self._fail_space_list = True\n self.assertRaises(exception.VolumeDriverException,\n self.driver.create_volume, volume)",
"def create_vm(args):\n if not args.disk and not args.pool:\n print(\"Either --disk or --pool option must be specified\", file=sys.stderr)\n return 1\n\n if args.disk and args.pool:\n print(\"--disk and --pool options are exclusive\", file=sys.stderr)\n return 1\n if args.pool and not args.disk_size:\n print(\"You must specify a disk size\", file=sys.stderr)\n return 1\n\n if args.net and args.virtual_network:\n print(\"--net and --virtual_network option are exclusive\", file=sys.stderr)\n return 1\n\n # insure unicity in networking options in BM case\n\n _all_net_names = set()\n if args.net:\n for n_name in args.net:\n if n_name not in _all_net_names:\n _all_net_names.add(n_name)\n else:\n print('Duplicate virtual network name [%s], ignore it', n_name)\n\n if '--network' in args.virt:\n sys.stderr.write(\"--network is not a supported option. Please retry without --network option.\\n\")\n return 1\n\n # sanity on extra arguments passed to virt-install(1)\n # some options do not create the guest but display information\n # this is wrongly interpreted as a succcess by underlying layers and we\n # may setup things by mistake\n _virt_install_extra = []\n for _a in args.virt:\n if _a not in ('--print-xml', '--version', '-h', '--help'):\n _virt_install_extra.append(_a)\n\n return oci_utils.kvm.virt.create(name=args.domain,\n root_disk=args.disk,\n pool=args.pool,\n disk_size=args.disk_size,\n network=list(_all_net_names),\n virtual_network=args.virtual_network,\n extra_args=_virt_install_extra)",
"def create_volume(self, volume):\n vg_name = self.get_volume_group_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n prov_type = self._get_is_dedup(volume.get('volume_type'))\n try:\n LOG.debug(\"Creating volume group with name: %(name)s, \"\n \"quota: unlimited and dedup_support: %(dedup)s\",\n {'name': vg_name, 'dedup': prov_type})\n\n vg = self.client.new(\"volume_groups\", name=vg_name, quota=0,\n is_dedup=prov_type).save()\n LOG.debug(\"Creating volume with name: %(name)s, size: %(size)s \"\n \"GB, volume_group: %(vg)s\",\n {'name': vol_name, 'size': volume.size, 'vg': vg_name})\n vol = self.client.new(\"volumes\", name=vol_name,\n size=volume.size * units.Mi,\n volume_group=vg).save()\n except Exception as ex:\n vg_rs = self.client.search(\"volume_groups\", name=vg_name)\n if vg_rs.total != 0:\n LOG.debug(\"Deleting vg: %s for failed volume in K2.\", vg_name)\n vg_rs.hits[0].delete()\n LOG.exception(\"Creation of volume %s failed.\", vol_name)\n raise KaminarioCinderDriverException(reason=ex)\n\n if self._get_is_replica(volume.volume_type) and self.replica:\n self._create_volume_replica(volume, vg, vol, self.replica.rpo)",
"def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)",
"def disk_create(context, values):\n return NotImplemented",
"def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)",
"def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")",
"def test_create_namespaced_template(self):\n pass",
"def create(*args):",
"def generate_brick_volfile(storage_unit, storage_unit_volfile_path, custom_options):\n\n options = copy.copy(DEFAULT_OPTIONS)\n options.update(custom_options)\n\n kadalu_volgen.generate(\n \"/var/lib/kadalu/templates/storage_unit.vol.j2\",\n data=storage_unit,\n options=options,\n output_file=storage_unit_volfile_path\n )",
"def create_zfs_volume(self, name, size_str, zfs_type='default', mirror_count=0):\n if name is None or size_str is None:\n LOG.error(_('Failed to create volume:%s,name or size can not be None')%name)\n return\n pname=self.poolname + '/' + name\n cmdstr=['zfs','create','-V',size_str,pname]\n self.zfsdlist.get_dev_initial()\n try:\n self._execute(*cmdstr,root_helper=self.r_helper,run_as_root=True) \n except putils.ProcessExecutionError as err:\n LOG.error(_('Cmd :%s') % err.cmd)\n LOG.error(_('StdOut :%s') % err.stdout)\n LOG.error(_('StdErr :%s') % err.stderr)\n raise NameError('Error:failed to create zfs volume:%s' % name) \n \n newdev=self.zfsdlist.get_dev_name()\n if newdev is None:\n raise NameError('Device for volume:%s create failure!!!' % name)\n \n self.set_property_of_volume('reservation',size_str, pname, raise_sign=False)"
] | [
"0.6814054",
"0.67270184",
"0.63690454",
"0.6242397",
"0.62349063",
"0.62077343",
"0.6189123",
"0.61831164",
"0.61421955",
"0.6133715",
"0.60924095",
"0.6034682",
"0.5951743",
"0.59173465",
"0.58347845",
"0.5762987",
"0.5725603",
"0.5716367",
"0.5713912",
"0.57078236",
"0.57024205",
"0.56885594",
"0.5671502",
"0.56612307",
"0.5656894",
"0.56514764",
"0.56459314",
"0.5640488",
"0.56229985",
"0.56162906"
] | 0.7235573 | 0 |
Edits a storage volume template [Arguments] | def fusion_api_edit_storage_volume_template(self, body, uri, api=None, headers=None):
return self.template.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit(self, connection_id, arguments, template):\n context = self.context\n self.connection_id = str(connection_id)\n arguments = str(arguments)\n self.arguments_src = arguments\n self._arg = Aqueduct.parse(arguments)\n if not isinstance(template, (str, unicode)):\n template = str(template)\n self.src = template\n self.template = t = context.template_class(template)\n t.cook()\n context._v_query_cache={}, Bucket()",
"def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)",
"def edit_bucket_template(self, template_id, **kwargs):\n msg = \"edit_bucket_template not implemented\"\n raise NotImplementedError(msg)",
"def fusion_api_create_storage_volume_template(self, body, api=None, headers=None):\n return self.template.create(body=body, api=api, headers=headers)",
"def _modify_template_disks_according_to_input(template_disks, input_disks):\n\n # Populating the disk names of vm-template in a dictionary,\n # and mapping them to their index in template_disks list\n vm_template_disk_names = {}\n for (i, disk) in enumerate(template_disks):\n vm_template_disk_names[disk.virtual_disk_name] = i\n\n from .vendored_sdks.models import VirtualDisk\n\n # Check if disks entered by the user exist in vm-template,\n # then override the properties specified. Else create a new disk.\n for disk in input_disks:\n if disk['name'] in vm_template_disk_names.keys():\n index = vm_template_disk_names[disk['name']]\n if 'controller' in disk.keys():\n template_disks[index].controller_id = disk['controller']\n if 'mode' in disk.keys():\n template_disks[index].independence_mode = disk['mode']\n if 'size' in disk.keys():\n template_disks[index].total_size = disk['size']\n\n else:\n disk_name = disk['name']\n if 'controller' in disk.keys():\n controller = disk['controller']\n else:\n raise CLIError('controller parameter not specified for disk ' + disk_name + \".\")\n if 'mode' in disk.keys():\n mode = disk['mode']\n else:\n raise CLIError('mode parameter not specified for disk ' + disk_name + \".\")\n if 'size' in disk.keys():\n size = disk['size']\n else:\n raise CLIError('size parameter not specified for disk ' + disk_name + \".\")\n\n disk_object = VirtualDisk(controller_id=controller,\n independence_mode=mode,\n total_size=size)\n template_disks.append(disk_object)\n return template_disks",
"def cli(env, identifier, name, minimum, maximum, userdata, userfile, cpu, memory):\n\n template = {}\n autoscale = AutoScaleManager(env.client)\n group = autoscale.details(identifier)\n\n template['name'] = name\n template['minimumMemberCount'] = minimum\n template['maximumMemberCount'] = maximum\n virt_template = {}\n if userdata:\n virt_template['userData'] = [{\"value\": userdata}]\n elif userfile:\n with open(userfile, 'r', encoding=\"utf-8\") as userfile_obj:\n virt_template['userData'] = [{\"value\": userfile_obj.read()}]\n virt_template['startCpus'] = cpu\n virt_template['maxMemory'] = memory\n\n # Remove any entries that are `None` as the API will complain about them.\n template['virtualGuestMemberTemplate'] = clean_dict(virt_template)\n clean_template = clean_dict(template)\n\n # If there are any values edited in the template, we need to get the OLD template values and replace them.\n if template['virtualGuestMemberTemplate']:\n # Update old template with new values\n for key, value in clean_template['virtualGuestMemberTemplate'].items():\n group['virtualGuestMemberTemplate'][key] = value\n clean_template['virtualGuestMemberTemplate'] = group['virtualGuestMemberTemplate']\n\n autoscale.edit(identifier, clean_template)\n click.echo(\"Done\")",
"def edit_equipment_template(self) -> None:\n it = self.app.ui.EquipmentTemplatesListWidget.currentItem()\n self.app.ui.EquipmentTemplatesListWidget.setCurrentItem(None)\n self.app.ui.EquipmentTemplatesListWidget.setCurrentItem(it)\n self.app.ui.CancelPropertiesButton.click()\n self.load_equipment_to_edit()",
"def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)",
"def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)",
"def test_customization_template_crud():\n template_crud = pxe.CustomizationTemplate(\n name=generate_random_string(size=8),\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n template_crud.create()\n with update(template_crud):\n template_crud.name = template_crud.name + \"_update\"\n template_crud.delete(cancel=False)",
"def setTemplate(self, template):\n self.template = template",
"def template(self, template):\n self._template = template",
"def update_volume(VolumeId=None, Name=None, MountPoint=None):\n pass",
"def update_with_template_args(args, list_args=None):\r\n if not args.get('--template'):\r\n return\r\n\r\n list_args = list_args or []\r\n\r\n template_path = args.pop('--template')\r\n if not os.path.exists(template_path):\r\n raise ArgumentError(\r\n 'File does not exist [-t | --template] = %s'\r\n % template_path)\r\n\r\n config = configparser.ConfigParser()\r\n ini_str = '[settings]\\n' + open(\r\n os.path.expanduser(template_path), 'r').read()\r\n ini_fp = StringIO(ini_str)\r\n config.readfp(ini_fp)\r\n\r\n # Merge template options with the options passed in\r\n for key, value in config.items('settings'):\r\n option_key = '--%s' % key\r\n if option_key in list_args:\r\n value = value.split(',')\r\n if not args.get(option_key):\r\n args[option_key] = value",
"def edit_form():\n return template (\"edit\")",
"def create_volume(c,i):\n return c.volumes.create(\n size = \"10\",\n display_name = \"instantserver-1\",\n display_description = \"Volume for instantserver-1\",\n imageRef = i\n )",
"def update_volumes():\n print 'do something useful here'",
"def edit_template(self):\n return '{}/{}.html'.format(self.object_name, self.edit_endpoint)",
"def template(self, template):\n\n self._template = template",
"def template(self, template):\n\n self._template = template",
"def input_template(template, fields):\n editor = os.environ.get('EDITOR', '/usr/bin/vim')\n with tempfile.NamedTemporaryFile('w+t') as ofile:\n ofile.write(template % fields)\n ofile.flush()\n user_command = '%s %s' % (editor, ofile.name)\n if os.system(user_command) != 0:\n raise Error('Error acquiring user input (command was %r).' % user_command)\n with open(ofile.name, 'r') as ifile:\n filled_template = ifile.read()\n\n fields = dict(parse_template(filled_template))\n return fields",
"def edit(self, template, tmpl_args, validator=None):\n template = self.env.get_template(template)\n clean_template = template.render(**tmpl_args)\n\n with tempfile.NamedTemporaryFile(mode=\"w+\") as f:\n f.write(clean_template)\n f.flush()\n mod_time = os.stat(f.name).st_mtime\n editor_command = self.command.format(f.name)\n while True:\n # 1. Execute an editor and check if the user saved the data\n self.app.console.cleanup()\n os.system(editor_command)\n self.app.console.start()\n self.app.display.redraw()\n file_was_saved = mod_time != os.stat(f.name).st_mtime\n if not file_was_saved:\n answer = self.app.console.query_bool(\"You haven't saved the file, \"\n \"do you want to retry?\")\n if not answer:\n return None\n continue\n\n f.seek(0, os.SEEK_SET)\n data_after_change = f.read()\n # 2. Handle YAML parsing\n try:\n parsed = yaml.safe_load(io.StringIO(data_after_change))\n except Exception:\n msg = \"Unable to parse result as YAML, do you want to retry?\"\n answer = self.app.console.query_bool(msg)\n if not answer:\n return None\n continue\n\n # 3. Handle external validation\n if validator is not None:\n try:\n parsed = validator(parsed)\n except Exception as e:\n msg = \" \".join(e.args)\n msg = \"Unable to parse values ({}), do you want to retry?\".format(msg)\n answer = self.app.console.query_bool(msg)\n if not answer:\n return None\n continue\n\n return parsed",
"def test_change_volume_type(self, create_volume, volumes_steps):\n volume_name = generate_ids('volume').next()\n create_volume(volume_name, volume_type=None)\n volumes_steps.change_volume_type(volume_name)",
"def action(self, args):\n create_new_scratch_file(args.file, self.settings, py_template_func)",
"def edit_template(request, template_id):\n template = None\n if template_id:\n template = models.Template.get_by_id(int(template_id))\n return utility.edit_instance(request, models.Template, forms.TemplateEditForm,\n 'admin/edit_template',\n urlresolvers.reverse('views.admin.list_templates'),\n template_id, template=template)",
"def update_volume( opencloud_volume ):\n\n client = connect_syndicate()\n\n vol_name = opencloud_volume.name\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive\n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n rc = client.update_volume( vol_name,\n description=vol_description,\n private=vol_private,\n archive=vol_archive,\n default_gateway_caps=vol_default_gateway_caps )\n\n if not rc:\n raise Exception(\"update_volume(%s) failed!\" % vol_name )\n\n except Exception, e:\n # transort or method error \n logger.exception(e)\n return False\n\n else:\n return True",
"def edit(self):\n template = TaskInfo._generate_template(self.dict())\n tempf = tempfile.mkstemp()[1]\n try:\n with open(tempf, 'w') as outfile:\n outfile.write(template)\n\n editor_cmd = [\n TaskInfo._select_editor(),\n tempf,\n ]\n os.system(\" \".join(editor_cmd))\n\n # validate edited file\n while True:\n try:\n self._file_update(tempf)\n break\n except TaskSyntaxError as e:\n input(\n # pylint: disable=line-too-long\n \"Task syntax error (enter returns to editor): {}\".format( # nopep8\n str(e)))\n os.system(\" \".join(editor_cmd))\n continue\n finally:\n if os.path.exists(tempf):\n os.remove(tempf)\n\n # commit changes\n self.serialize()",
"def retype(self, ctxt, volume, new_type, diff, host):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(volume)\n vpool_name = new_type['extra_specs']['ViPR:VPOOL']\n\n try:\n task = self.volume_obj.update(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project,\n volume_name,\n vpool_name)\n\n self.volume_obj.check_for_sync(task['task'][0], True)\n return True\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": update failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s type update failed\") % volume_name)",
"def fusion_api_update_storage_volume(self, body, uri, api=None, headers=None):\n return self.volume.update(body=body, uri=uri, api=api, headers=headers)",
"def perform_module_operation(self):\n size = self.module.params['size']\n state = self.module.params['state']\n new_name = self.module.params['new_name']\n vol_id = self.module.params['vol_id']\n vol_name = self.module.params['vol_name']\n sg_name = self.module.params['sg_name']\n cap_unit = self.module.params['cap_unit']\n new_sg_name = self.module.params['new_sg_name']\n\n if vol_name is not None and sg_name is None:\n self.show_error_exit(msg='Specify Storage group name along '\n 'with volume name')\n\n if size and cap_unit is None:\n cap_unit = 'GB'\n elif cap_unit and size is None:\n self.show_error_exit(msg='Parameters size and cap_unit are '\n 'required together')\n self.volume_id = vol_id\n\n vol = self.get_volume()\n\n existing_vol_size = 0\n if vol is not None:\n self.volume_id = vol['volumeId']\n vol_id = vol['volumeId']\n existing_vol_size = vol['cap_gb']\n\n changed = False\n\n # Call to create volume in storage group\n if state == 'present' and vol is None:\n if new_name:\n self.show_error_exit(msg=\"Invalid argument new_name \"\n \"while creating a volume\")\n if size is None:\n self.show_error_exit(msg='Size is required to create volume')\n vol_id = self.create_volume(vol_name, sg_name, size, cap_unit)\n changed = True\n\n if state == 'present' and vol and size:\n if size is None:\n self.show_error_exit(msg='Size is required to expand volume')\n # Convert the given size to GB\n if size is not None and size > 0:\n size = utils.get_size_in_gb(size, cap_unit)\n LOG.info('Existing Size: %s GB, Specified Size: %s GB',\n existing_vol_size, size)\n changed = self.expand_volume_helper(vol, size, existing_vol_size)\n\n if state == 'present' and vol and new_name is not None:\n if len(new_name.strip()) == 0:\n self.show_error_exit(msg=\"Please provide valid volume \"\n \"name.\")\n\n vol_name = vol['volume_identifier']\n if new_name != vol_name:\n LOG.info('Changing the name of volume %s to %s',\n vol_name, new_name)\n changed = self.rename_volume(vol_id, new_name) or changed\n\n if state == 'absent' and vol:\n LOG.info('Deleting volume %s ', vol_id)\n changed = self.delete_volume(vol_id) or changed\n\n if state == 'present' and vol and new_sg_name:\n vol_sg = vol['storageGroupId'][0]\n if vol_sg != new_sg_name:\n LOG.info('Moving volume from %s to %s', vol_sg, new_name)\n changed = self.move_volume_between_storage_groups(\n vol, sg_name, new_sg_name) or changed\n\n '''\n Finally update the module changed state and saving updated volume\n details\n '''\n self.u4v_conn.set_array_id(\n array_id=self.module.params['serial_no'])\n self.result[\"changed\"] = changed\n if state == 'present':\n self.result[\"volume_details\"] = self.get_volume()\n LOG.info(\"Closing unisphere connection %s\", self.u4v_conn)\n utils.close_connection(self.u4v_conn)\n LOG.info(\"Connection closed successfully\")\n self.module.exit_json(**self.result)"
] | [
"0.6604789",
"0.6382298",
"0.627123",
"0.61627245",
"0.57500106",
"0.5632828",
"0.557544",
"0.548894",
"0.5487666",
"0.5487666",
"0.54597175",
"0.54359376",
"0.5409239",
"0.5394115",
"0.53886133",
"0.53731084",
"0.53463495",
"0.53461826",
"0.5322886",
"0.5322886",
"0.5293684",
"0.5266999",
"0.5231782",
"0.5218871",
"0.5204222",
"0.51891303",
"0.5188562",
"0.5166628",
"0.5047683",
"0.5032446"
] | 0.7517243 | 0 |
Gets a collection of Storage Volumes template. [Arguments] | def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):
return self.template.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)",
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()",
"def fusion_api_storage_system_get_templates(self, uri=None, param='', api=None, headers=None):\n return self.system.get_templates(uri=uri, api=api, headers=headers, param=param)",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def volumes(self):",
"def getVolumes(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_VOLUMES}', params=params)",
"def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs",
"def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret",
"def fusion_api_create_storage_volume_template(self, body, api=None, headers=None):\n return self.template.create(body=body, api=api, headers=headers)",
"def ft_volumeslice( slice_name ):\n print \"slice: %s\" % slice_name\n \n volumes = get_volumeslice_volume_names( slice_name )\n \n print \"volumes mounted in slice %s:\" % slice_name\n for v in volumes:\n print \" %s:\" % v\n \n vs = get_volumeslice( v, slice_name )\n \n print \" %s\" % dir(vs)",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def create_volume(c,i):\n return c.volumes.create(\n size = \"10\",\n display_name = \"instantserver-1\",\n display_description = \"Volume for instantserver-1\",\n imageRef = i\n )",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def get_persistent_volumes(k8s_ctx: str) -> List[str]:\n cmd = f'kubectl --context={k8s_ctx} get pv -o json'\n p = safe_exec(cmd)\n try:\n dvols = json.loads(p.stdout.decode())\n except Exception as err:\n raise RuntimeError('Error when parsing listing of Kubernetes persistent volumes ' + str(err))\n if dvols is None:\n raise RuntimeError('Result of kubectl pv listing could not be read properly')\n return [i['metadata']['name'] for i in dvols['items']]",
"def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes",
"def getStorageVolumeData(self,node,storage,volume):\n data = self.connect('get','nodes/%s/storage/%s/content/%s' % (node,storage,volume),None)\n return data",
"def volumes(self) -> Optional[Sequence['_core.v1.outputs.Volume']]:\n return pulumi.get(self, \"volumes\")",
"def find_templates(self, name):\n script = (\n 'Get-SCVMTemplate -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVMTemplate(system=self, raw=tmpl_data) for tmpl_data in data]\n return [SCVMTemplate(system=self, raw=data)]",
"def volumes(self, details=True):\n if details:\n vol = _volume.Volume\n else:\n vol = _volume.VolumeDetail\n\n return list(self._list(vol, paginated=False))",
"def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes",
"def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")",
"def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")",
"def template():\n\n return s3_rest_controller(rheader = s3db.dc_rheader)",
"def volumes(self) -> dict:\n return self.data[\"volumes\"]",
"def volume_get(context, volume_id):\n return _volume_get(context, volume_id)",
"def volumes(self):\n return self._volumes"
] | [
"0.66193295",
"0.6232202",
"0.6216252",
"0.61740613",
"0.6091448",
"0.6026836",
"0.5980668",
"0.5839226",
"0.5821388",
"0.580545",
"0.57947487",
"0.57925844",
"0.5769682",
"0.5715384",
"0.5713581",
"0.5696221",
"0.56869644",
"0.5641485",
"0.56224024",
"0.56187606",
"0.5549489",
"0.55478036",
"0.55317175",
"0.5530188",
"0.54892856",
"0.54892856",
"0.5465359",
"0.54609877",
"0.54502714",
"0.54345983"
] | 0.75884503 | 0 |
Deletes storage volumes template based on name OR uri. [Arguments] | def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):
return self.template.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))",
"def delete_custom_template(self, name, filename, context):\n pass",
"def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)",
"def delete_volumes(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]",
"def destroy_template(name=None, call=None, kwargs=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The destroy_template function must be called with -f.\"\n )\n if kwargs is None:\n kwargs = {}\n name = kwargs.get(\"name\", None)\n session = _get_session()\n vms = session.xenapi.VM.get_all_records()\n ret = {}\n found = False\n for vm in vms:\n record = session.xenapi.VM.get_record(vm)\n if record[\"is_a_template\"]:\n if record[\"name_label\"] == name:\n found = True\n # log.debug(record['name_label'])\n session.xenapi.VM.destroy(vm)\n ret[name] = {\"status\": \"destroyed\"}\n if not found:\n ret[name] = {\"status\": \"not found\"}\n return ret",
"def delete_cloudformation_template(self, name, filename, context):\n stack_name = utils.generate_stack_name(context['Stage'], self.name, name)\n utils.delete_cf_stack(\n name=stack_name,\n dry_run=self.dry_run\n )",
"def test_delete_namespaced_template(self):\n pass",
"def delete_template():\n posted_json = request.get_json(force=True)\n try:\n name = posted_json['template_name']\n except KeyError:\n print(\"Not all required keys are present!\")\n r = jsonify(message=\"Not all required keys for add template are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n if bootstrapper_utils.delete_template(name):\n return jsonify(success=True, message='Deleted Template Successfully', status_code=200)\n else:\n r = jsonify(success=False, message='Could not delete template', status_code=500)\n r.status_code = 500\n return r",
"def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass",
"def delete_template(self, filename):\n if self.template_exists(filename):\n self.client.service.DeleteTemplate(filename=filename)\n else:\n raise LiveDocxError('Template \"%s\" not exists and it cannot be deleted' % filename)",
"def delete_vs(vs_name, created_objects):\r\n if keep_objects:\r\n return\r\n custom_object_api_instance = client.CustomObjectsApi()\r\n try:\r\n custom_object_api_response = custom_object_api_instance.delete_namespaced_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshots\",\r\n name=vs_name,\r\n namespace=namespace_value\r\n )\r\n LOGGER.debug(custom_object_api_response)\r\n LOGGER.info(f\"Volume Snapshot Delete : {vs_name} deleted\")\r\n created_objects[\"vs\"].remove(vs_name)\r\n except ApiException as e:\r\n LOGGER.error(f\"Exception when calling CustomObjectsApi->delete_cluster_custom_object: {e}\")\r\n clean_with_created_objects(created_objects)\r\n assert False",
"def test_delete_volumes(self, volumes_count, volumes_steps,\n create_volumes):\n volume_names = list(generate_ids('volume', count=volumes_count))\n create_volumes(volume_names)",
"def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))",
"def destroy(name, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"The destroy action must be called with -d, --destroy, -a or --action.\"\n )\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroying instance\",\n \"salt/cloud/{}/destroying\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n attached_volumes = None\n\n delete_volumes = config.get_cloud_config_value(\n \"delete_volumes\", get_configured_provider(), __opts__, search_global=False\n )\n # Get volumes before the server is deleted\n attached_volumes = conn.get_attached_volumes(\n datacenter_id=datacenter_id, server_id=node[\"id\"]\n )\n\n conn.delete_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n # The server is deleted and now is safe to delete the volumes\n if delete_volumes:\n for vol in attached_volumes[\"items\"]:\n log.debug(\"Deleting volume %s\", vol[\"id\"])\n conn.delete_volume(datacenter_id=datacenter_id, volume_id=vol[\"id\"])\n log.debug(\"Deleted volume %s\", vol[\"id\"])\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroyed instance\",\n \"salt/cloud/{}/destroyed\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n if __opts__.get(\"update_cachedir\", False) is True:\n __utils__[\"cloud.delete_minion_cachedir\"](\n name, _get_active_provider_name().split(\":\")[0], __opts__\n )\n\n return True",
"def delete_template(self, name, mount_point=DEFAULT_MOUNT_POINT):\n params = {\n 'name': name,\n }\n api_path = '/v1/{mount_point}/template/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.delete(\n url=api_path,\n json=params,\n )",
"def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return",
"def delete_template(self):\n return '{}/{}.html'.format(self.object_name, self.delete_endpoint)",
"def delete_volume(self, volume):\n nfs_share = volume.get('provider_location')\n if nfs_share:\n nms = self.share2nms[nfs_share]\n vol, parent_folder = self._get_share_datasets(nfs_share)\n folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])\n mount_path = self.remote_path(volume).strip(\n '/%s' % self.VOLUME_FILE_NAME)\n if mount_path in self._remotefsclient._read_mounts():\n cinder.privsep.fs.umount(mount_path)\n try:\n props = nms.folder.get_child_props(folder, 'origin') or {}\n nms.folder.destroy(folder, '-r')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Folder %s does not exist, it was '\n 'already deleted.', folder)\n return\n raise\n self._get_capacity_info(nfs_share)\n origin = props.get('origin')\n if origin and self._is_clone_snapshot_name(origin):\n try:\n nms.snapshot.destroy(origin, '')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Snapshot %s does not exist, it was '\n 'already deleted.', origin)\n return\n raise",
"def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)",
"def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()",
"def delete_volume_snapshot(volume_snapshots):\n if type(volume_snapshots) is not list:\n volumes = [volume_snapshots]\n command = 'cinder snapshot-delete %s' % \\\n \" \".join(snapshot['id'] for snapshot in volume_snapshots)\n d = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def delete_tag_template(self, name):\n self.__datacatalog.delete_tag_template(name=name, force=True)\n logging.info('Tag Template deleted: %s', name)",
"def test_aws_service_api_volume_delete(self):\n pass",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def storage_pool_delete_by_storage(context, storage_id):\n _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))"
] | [
"0.69322246",
"0.66325945",
"0.6535928",
"0.64637643",
"0.64629805",
"0.63880926",
"0.63642126",
"0.6279384",
"0.6176047",
"0.6130483",
"0.6120978",
"0.6117986",
"0.61067766",
"0.61050576",
"0.60910296",
"0.60836864",
"0.60775805",
"0.6065635",
"0.60409033",
"0.5998365",
"0.5995144",
"0.59454024",
"0.594236",
"0.5905566",
"0.5903204",
"0.58989364",
"0.5886229",
"0.5886026",
"0.5882854",
"0.5855925"
] | 0.831892 | 0 |
update storage volume [Arguments] | def fusion_api_update_storage_volume(self, body, uri, api=None, headers=None):
return self.volume.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_volume(VolumeId=None, Name=None, MountPoint=None):\n pass",
"def update_volume( opencloud_volume ):\n\n client = connect_syndicate()\n\n vol_name = opencloud_volume.name\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive\n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n rc = client.update_volume( vol_name,\n description=vol_description,\n private=vol_private,\n archive=vol_archive,\n default_gateway_caps=vol_default_gateway_caps )\n\n if not rc:\n raise Exception(\"update_volume(%s) failed!\" % vol_name )\n\n except Exception, e:\n # transort or method error \n logger.exception(e)\n return False\n\n else:\n return True",
"def update_volumes():\n print 'do something useful here'",
"def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"",
"def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)",
"def _update_volume(self):\n self._log(\"raw\", \"self._volume : {0}, type : {1}\".format(self._volume, type(self._volume)))\n self._log(\"raw\", \"self._media_volume : {0}, type : {1}\".format(self._media_volume, type(self._media_volume)))\n self._log(\"debug\", \"setting volume : {0}, type : {1}\".format(settings.get(\"vlc\", \"volume\", \"master\"), type(settings.get(\"vlc\", \"volume\", \"master\"))))\n volume = float(int(self._volume) * int(self._media_volume) * int(settings.get(\"vlc\", \"volume\", \"master\")) / 10000)\n if volume > 100:\n volume = 100\n elif volume < 0:\n volume = 0\n self.stdin_queue.put_nowait(\"volume {0}\".format(int(volume)))",
"def Set(*args):\n return _XCAFDoc.XCAFDoc_Volume_Set(*args)",
"def disk_update(context, disk_id, values):\n return NotImplemented",
"def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)",
"def fusion_api_edit_storage_volume_template(self, body, uri, api=None, headers=None):\n return self.template.update(body=body, uri=uri, api=api, headers=headers)",
"async def volume(self, ctx: commands.Context, volume: int):\n if not 0 <= volume <= 100:\n raise InvalidVolume()\n\n player = ctx.bot.lavalink.player_manager.get(ctx.guild.id)\n \n await player.set_volume(volume)\n await ctx.send(f'Volume alterado para {volume}%.')",
"def perform_module_operation(self):\n size = self.module.params['size']\n state = self.module.params['state']\n new_name = self.module.params['new_name']\n vol_id = self.module.params['vol_id']\n vol_name = self.module.params['vol_name']\n sg_name = self.module.params['sg_name']\n cap_unit = self.module.params['cap_unit']\n new_sg_name = self.module.params['new_sg_name']\n\n if vol_name is not None and sg_name is None:\n self.show_error_exit(msg='Specify Storage group name along '\n 'with volume name')\n\n if size and cap_unit is None:\n cap_unit = 'GB'\n elif cap_unit and size is None:\n self.show_error_exit(msg='Parameters size and cap_unit are '\n 'required together')\n self.volume_id = vol_id\n\n vol = self.get_volume()\n\n existing_vol_size = 0\n if vol is not None:\n self.volume_id = vol['volumeId']\n vol_id = vol['volumeId']\n existing_vol_size = vol['cap_gb']\n\n changed = False\n\n # Call to create volume in storage group\n if state == 'present' and vol is None:\n if new_name:\n self.show_error_exit(msg=\"Invalid argument new_name \"\n \"while creating a volume\")\n if size is None:\n self.show_error_exit(msg='Size is required to create volume')\n vol_id = self.create_volume(vol_name, sg_name, size, cap_unit)\n changed = True\n\n if state == 'present' and vol and size:\n if size is None:\n self.show_error_exit(msg='Size is required to expand volume')\n # Convert the given size to GB\n if size is not None and size > 0:\n size = utils.get_size_in_gb(size, cap_unit)\n LOG.info('Existing Size: %s GB, Specified Size: %s GB',\n existing_vol_size, size)\n changed = self.expand_volume_helper(vol, size, existing_vol_size)\n\n if state == 'present' and vol and new_name is not None:\n if len(new_name.strip()) == 0:\n self.show_error_exit(msg=\"Please provide valid volume \"\n \"name.\")\n\n vol_name = vol['volume_identifier']\n if new_name != vol_name:\n LOG.info('Changing the name of volume %s to %s',\n vol_name, new_name)\n changed = self.rename_volume(vol_id, new_name) or changed\n\n if state == 'absent' and vol:\n LOG.info('Deleting volume %s ', vol_id)\n changed = self.delete_volume(vol_id) or changed\n\n if state == 'present' and vol and new_sg_name:\n vol_sg = vol['storageGroupId'][0]\n if vol_sg != new_sg_name:\n LOG.info('Moving volume from %s to %s', vol_sg, new_name)\n changed = self.move_volume_between_storage_groups(\n vol, sg_name, new_sg_name) or changed\n\n '''\n Finally update the module changed state and saving updated volume\n details\n '''\n self.u4v_conn.set_array_id(\n array_id=self.module.params['serial_no'])\n self.result[\"changed\"] = changed\n if state == 'present':\n self.result[\"volume_details\"] = self.get_volume()\n LOG.info(\"Closing unisphere connection %s\", self.u4v_conn)\n utils.close_connection(self.u4v_conn)\n LOG.info(\"Connection closed successfully\")\n self.module.exit_json(**self.result)",
"def volume_up(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)",
"def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )",
"def _volume_command(ramp, volume):\n if volume is not None:\n ramp.set_volume(float(volume))\n else:\n print ramp.volume",
"def vol_handler(bot, update, args):\n if len(args) == 1:\n if args[0].isdigit():\n return update.message.reply_text(vol_set(args[0]))\n elif args[0] == 'mute':\n return update.message.reply_text(vol_mute())\n\n return update.message.reply_text('Syntax: /v [mute|<level(0-100)>]')",
"def setVolume(self, *args):\n return _libsbml.Compartment_setVolume(self, *args)",
"def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n size_in_bytes = vipr_utils.to_bytes(str(new_size) + \"G\")\n\n try:\n self.volume_obj.expand(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project +\n \"/\" +\n volume_name,\n size_in_bytes,\n True)\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": expand failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s expand failed\") % volume_name)",
"def volup(self, raiseby=1):\n command + 'volup ' + str(raiseby)\n self.run_command(command)",
"def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)",
"def do_update(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif not \"{}.{}\".format(args[0], args[1]) in dicti:\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n key = dicti[\"{}.{}\".format(args[0], args[1])]\n setattr(key, args[2], args[3])\n key.save()",
"def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])",
"def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):",
"async def do_update(self, data):\n old = await self.config()\n\n new = old.copy()\n new.update(data)\n\n verrors = ValidationErrors()\n\n for attr, minlen, maxlen in (\n ('access_key', 5, 20),\n ('secret_key', 8, 40),\n ):\n curlen = len(new.get(attr, ''))\n if curlen < minlen or curlen > maxlen:\n verrors.add(\n f's3_update.{attr}', f'Attribute should be {minlen} to {maxlen} in length'\n )\n\n if not new['storage_path']:\n verrors.add('s3_update.storage_path', 'Storage path is required')\n else:\n await check_path_resides_within_volume(\n verrors, self.middleware, 's3_update.storage_path', new['storage_path']\n )\n\n if not verrors:\n if new['storage_path'].rstrip('/').count('/') < 3:\n verrors.add(\n 's3_update.storage_path',\n 'Top level datasets are not allowed. i.e /mnt/tank/dataset is allowed'\n )\n else:\n # If the storage_path does not exist, let's create it\n if not os.path.exists(new['storage_path']):\n os.makedirs(new['storage_path'])\n\n if new['certificate']:\n verrors.extend((await self.middleware.call(\n 'certificate.cert_services_validation', new['certificate'], 's3_update.certificate', False\n )))\n\n if new['bindip'] not in await self.bindip_choices():\n verrors.add('s3_update.bindip', 'Please provide a valid ip address')\n\n if verrors:\n raise verrors\n\n new['disks'] = new.pop('storage_path')\n\n await self._update_service(old, new)\n\n if (await self.middleware.call('filesystem.stat', new['disks']))['user'] != 'minio':\n await self.middleware.call(\n 'filesystem.setperm',\n {\n 'path': new['disks'],\n 'mode': str(775),\n 'uid': (await self.middleware.call('dscache.get_uncached_user', 'minio'))['pw_uid'],\n 'gid': (await self.middleware.call('dscache.get_uncached_group', 'minio'))['gr_gid'],\n 'options': {'recursive': True, 'traverse': False}\n }\n )\n\n return await self.config()",
"async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)",
"def volume_up(self):\n self._volume += settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"volup\")",
"def XCAFDoc_Volume_Set(*args):\n return _XCAFDoc.XCAFDoc_Volume_Set(*args)",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def set_volume(self, volume):\n self.get(COMMAND_UIC, 'SetVolume', [('volume', int(volume))])",
"def volume(self, value):\n self._volume = value\n self._sendCommand('%03dVL' % value)"
] | [
"0.7190459",
"0.7092629",
"0.6981456",
"0.6419218",
"0.6410365",
"0.62468994",
"0.6128892",
"0.61107886",
"0.60772836",
"0.60474294",
"0.59711313",
"0.5960724",
"0.59175813",
"0.5858114",
"0.58509284",
"0.5834266",
"0.5816146",
"0.5813269",
"0.5780586",
"0.5769371",
"0.57591677",
"0.5723276",
"0.56984675",
"0.5691175",
"0.56899166",
"0.5665906",
"0.5657488",
"0.5646642",
"0.5645751",
"0.56447566"
] | 0.715785 | 1 |
Gets a collection of Storage Volumes. [Arguments] | def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):
return self.volume.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def list_volumes(self, node=None):\n\n data = self._perform_get(self._get_disk_path(), Disks)\n volumes = [self._to_volume(volume=v, node=node) for v in data]\n return volumes",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def volumes(self):",
"def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs",
"def volumes(self, details=True):\n if details:\n vol = _volume.Volume\n else:\n vol = _volume.VolumeDetail\n\n return list(self._list(vol, paginated=False))",
"def volumes(self) -> Iterable[dto.Volume]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def volumes(self):\n return self._volumes",
"def getVolumes(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_VOLUMES}', params=params)",
"def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass",
"def volumes(self) -> Optional[Sequence['_core.v1.outputs.Volume']]:\n return pulumi.get(self, \"volumes\")",
"def volumes(self) -> Sequence['outputs.GetVolumeGroupSapHanaVolumeResult']:\n return pulumi.get(self, \"volumes\")",
"def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def volume(self):\n return [node.volume for node in self]",
"def volume_get_all(context, marker=None, limit=None, sort_keys=None,\n sort_dirs=None, filters=None, offset=None):\n session = get_session()\n with session.begin():\n # Generate the query\n query = _generate_paginate_query(context, session, models.Volume,\n marker, limit, sort_keys, sort_dirs,\n filters, offset)\n # No volume would match, return empty list\n if query is None:\n return []\n return query.all()",
"def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret",
"def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")",
"def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volumes\")",
"def volumes(self) -> dict:\n return self.data[\"volumes\"]",
"def fusion_api_get_storage_volume_attachments(self, uri=None, param='', api=None, headers=None):\n return self.volume_attachment.get(uri=uri, param=param, api=api, headers=headers)",
"def _get_volumes(list_of_volume_ids):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n volumes = ec2_client.get_all_volumes(\n volume_ids=list_of_volume_ids)\n except boto.exception.EC2ResponseError as e:\n if 'InvalidVolume.NotFound' in e:\n all_volumes = ec2_client.get_all_volumes()\n utils.log_available_resources(all_volumes)\n return None\n except boto.exception.BotoServerError as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return volumes",
"def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)",
"def getStorageVolumeData(self,node,storage,volume):\n data = self.connect('get','nodes/%s/storage/%s/content/%s' % (node,storage,volume),None)\n return data",
"def volumes(self) -> Optional[Sequence['_core.v1.outputs.VolumePatch']]:\n return pulumi.get(self, \"volumes\")",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage",
"def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage"
] | [
"0.7352446",
"0.7275717",
"0.713416",
"0.70805156",
"0.69414204",
"0.69319326",
"0.6845503",
"0.6827726",
"0.67976844",
"0.6723703",
"0.6651151",
"0.6629031",
"0.6606961",
"0.6578981",
"0.65522295",
"0.6530312",
"0.64605623",
"0.6416945",
"0.63577414",
"0.63155895",
"0.62700075",
"0.62700075",
"0.62517303",
"0.62413776",
"0.6193687",
"0.6152855",
"0.6105823",
"0.6098017",
"0.6085531",
"0.6085531"
] | 0.77948713 | 0 |
Deletes storage volumes based on name OR uri. [Arguments] | def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):
return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_volumes(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]",
"def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()",
"def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete_volume(self, volume):\n nfs_share = volume.get('provider_location')\n if nfs_share:\n nms = self.share2nms[nfs_share]\n vol, parent_folder = self._get_share_datasets(nfs_share)\n folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])\n mount_path = self.remote_path(volume).strip(\n '/%s' % self.VOLUME_FILE_NAME)\n if mount_path in self._remotefsclient._read_mounts():\n cinder.privsep.fs.umount(mount_path)\n try:\n props = nms.folder.get_child_props(folder, 'origin') or {}\n nms.folder.destroy(folder, '-r')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Folder %s does not exist, it was '\n 'already deleted.', folder)\n return\n raise\n self._get_capacity_info(nfs_share)\n origin = props.get('origin')\n if origin and self._is_clone_snapshot_name(origin):\n try:\n nms.snapshot.destroy(origin, '')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Snapshot %s does not exist, it was '\n 'already deleted.', origin)\n return\n raise",
"def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))",
"def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)",
"def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()",
"def delete(self, name):\n result = self.cm.find_name(name)\n path = result[0]['path']\n delete_path = Path(f'{path}/{name}')\n try:\n os.system(f\"rmdir {delete_path}\")\n result[0]['State'] = 'deleted'\n result = self.update_dict(result)\n except:\n Console.error(\"volume is either not empty or not exist\")\n return result",
"def test_aws_service_api_volume_delete(self):\n pass",
"def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))",
"def delete_volume_snapshot(volume_snapshots):\n if type(volume_snapshots) is not list:\n volumes = [volume_snapshots]\n command = 'cinder snapshot-delete %s' % \\\n \" \".join(snapshot['id'] for snapshot in volume_snapshots)\n d = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]",
"def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))",
"def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()",
"def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)",
"def delete(self):\r\n return self.connection.delete_volume(self.id)",
"def test_delete_volumes(self, volumes_count, volumes_steps,\n create_volumes):\n volume_names = list(generate_ids('volume', count=volumes_count))\n create_volumes(volume_names)",
"def destroy(name, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"The destroy action must be called with -d, --destroy, -a or --action.\"\n )\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroying instance\",\n \"salt/cloud/{}/destroying\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n attached_volumes = None\n\n delete_volumes = config.get_cloud_config_value(\n \"delete_volumes\", get_configured_provider(), __opts__, search_global=False\n )\n # Get volumes before the server is deleted\n attached_volumes = conn.get_attached_volumes(\n datacenter_id=datacenter_id, server_id=node[\"id\"]\n )\n\n conn.delete_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n # The server is deleted and now is safe to delete the volumes\n if delete_volumes:\n for vol in attached_volumes[\"items\"]:\n log.debug(\"Deleting volume %s\", vol[\"id\"])\n conn.delete_volume(datacenter_id=datacenter_id, volume_id=vol[\"id\"])\n log.debug(\"Deleted volume %s\", vol[\"id\"])\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroyed instance\",\n \"salt/cloud/{}/destroyed\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n if __opts__.get(\"update_cachedir\", False) is True:\n __utils__[\"cloud.delete_minion_cachedir\"](\n name, _get_active_provider_name().split(\":\")[0], __opts__\n )\n\n return True",
"def delete_volume(self, context, volume_id, unmanage_only=False):\n context = context.elevated()\n\n volume_ref = self.db.volume_get(context, volume_id)\n\n if context.project_id != volume_ref['project_id']:\n project_id = volume_ref['project_id']\n else:\n project_id = context.project_id\n\n LOG.info(_(\"volume %s: deleting\"), volume_ref['id'])\n if volume_ref['attach_status'] == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=volume_id)\n\n self._notify_about_volume_usage(context, volume_ref, \"delete.start\")\n self._reset_stats()\n\n try:\n self._delete_cascaded_volume(context, volume_id)\n except Exception:\n LOG.exception(_(\"Failed to deleting volume\"))\n # Get reservations\n try:\n reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting volume\"))\n\n # Delete glance metadata if it exists\n try:\n self.db.volume_glance_metadata_delete_by_volume(context, volume_id)\n LOG.debug(_(\"volume %s: glance metadata deleted\"),\n volume_ref['id'])\n except exception.GlanceMetadataNotFound:\n LOG.debug(_(\"no glance metadata found for volume %s\"),\n volume_ref['id'])\n\n self.db.volume_destroy(context, volume_id)\n LOG.info(_(\"volume %s: deleted successfully\"), volume_ref['id'])\n self._notify_about_volume_usage(context, volume_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.publish_service_capabilities(context)\n\n return True",
"def delete_volumes(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n ids=None, # type: List[str]\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n ids=ids,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volumes_api.api20_volumes_delete_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n return self._call_api(endpoint, kwargs)",
"def delete_vs(vs_name, created_objects):\r\n if keep_objects:\r\n return\r\n custom_object_api_instance = client.CustomObjectsApi()\r\n try:\r\n custom_object_api_response = custom_object_api_instance.delete_namespaced_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshots\",\r\n name=vs_name,\r\n namespace=namespace_value\r\n )\r\n LOGGER.debug(custom_object_api_response)\r\n LOGGER.info(f\"Volume Snapshot Delete : {vs_name} deleted\")\r\n created_objects[\"vs\"].remove(vs_name)\r\n except ApiException as e:\r\n LOGGER.error(f\"Exception when calling CustomObjectsApi->delete_cluster_custom_object: {e}\")\r\n clean_with_created_objects(created_objects)\r\n assert False",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def delete_volume(self, volume):\n vg_name = self.get_volume_group_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n try:\n if self._get_is_replica(volume.volume_type) and self.replica:\n self._delete_volume_replica(volume, vg_name, vol_name)\n\n LOG.debug(\"Searching and deleting volume: %s in K2.\", vol_name)\n vol_rs = self.client.search(\"volumes\", name=vol_name)\n if vol_rs.total != 0:\n vol_rs.hits[0].delete()\n LOG.debug(\"Searching and deleting vg: %s in K2.\", vg_name)\n vg_rs = self.client.search(\"volume_groups\", name=vg_name)\n if vg_rs.total != 0:\n vg_rs.hits[0].delete()\n except Exception as ex:\n LOG.exception(\"Deletion of volume %s failed.\", vol_name)\n raise KaminarioCinderDriverException(reason=ex)",
"def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return",
"def storage_pool_delete_by_storage(context, storage_id):\n _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()",
"def delete_volume_snapshots(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n ids=None, # type: List[str]\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n ids=ids,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volume_snapshots_api.api20_volume_snapshots_delete_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n return self._call_api(endpoint, kwargs)",
"def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()",
"def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)",
"def remove(self, mount_point, delete_vols=False, detach=True):\n log.debug(\"Removing volume-based FS @ mount point {0} (delete_vols: \"\n \"{1}; detach: {2})\".format(mount_point, delete_vols, detach))\n self.unmount(mount_point)\n if detach:\n log.debug(\"Detaching volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if self.detach():\n log.debug(\"Detached volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if ((self.static and (ServiceRole.GALAXY_DATA not in self.fs.svc_roles))\n or delete_vols):\n log.debug(\"Deleting volume {0} as part of {1} removal\".format(\n self.volume_id, self.fs.get_full_name()))\n self.delete()\n else:\n log.debug(\"Unmounted {0} but was instructed not to detach volume {1}\"\n .format(self.fs.get_full_name(), self.volume_id))"
] | [
"0.7386203",
"0.7326507",
"0.70525193",
"0.67831665",
"0.6738581",
"0.6667749",
"0.65804183",
"0.65754896",
"0.65166175",
"0.6488847",
"0.6486761",
"0.6472515",
"0.63933235",
"0.63833314",
"0.6362582",
"0.6329666",
"0.6297816",
"0.624408",
"0.62437993",
"0.6227428",
"0.6224194",
"0.6223354",
"0.62043476",
"0.61300623",
"0.6108977",
"0.6103956",
"0.60168815",
"0.60103875",
"0.6004419",
"0.597958"
] | 0.78018206 | 0 |
Add existing storage volume on the storage system into Oneview [Arguments] | def fusion_api_add_existing_storage_volume(self, body, api=None, headers=None):
return self.volume.add_existing(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_create_storage_volume(self, body, api=None, headers=None):\n return self.volume.create(body=body, api=api, headers=headers)",
"def do_create_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No name given.\")\n return\n if len(args) == 1:\n self.perror(\"No path given.\")\n return\n if not os.path.isabs(args[1]):\n print(\"Path must be absolute: \" + args[1])\n return\n self.do_coroutine(self._localStorageRoutines.create_volume_routine(args[0], args[1]))",
"def add_volume(self, volume: 'Volume'):\n self.volumes.append(volume)",
"def _create_volume(self):\n vol = {}\n vol['size'] = 1\n vol['availability_zone'] = 'test'\n return db.volume_create(self.context, vol)['id']",
"def add(self):\n self.create(self.fs.name)\n # Mark a volume as 'static' if created from a snapshot\n # Note that if a volume is marked as 'static', it is assumed it\n # can be deleted upon cluster termination!\n if (ServiceRole.GALAXY_DATA not in self.fs.svc_roles and\n (self.from_snapshot_id is not None or self.from_archive is not\n None)):\n log.debug(\"Marked volume '%s' from file system '%s' as 'static'\" %\n (self.volume_id, self.fs.name))\n # FIXME: This is a major problem - any new volumes added from a snapshot\n # will be assumed 'static'. This is OK before being able to add an\n # arbitrary volume as a file system but is no good any more. The\n # problem is in automatically detecting volumes that are supposed\n # to be static and are being added automatically at startup\n if self.from_archive:\n self.fs.kind = 'volume' # Treated as a regular volume after initial extraction\n else:\n self.static = True\n self.fs.kind = 'snapshot'\n else:\n self.fs.kind = 'volume'\n if self.attach():\n us = os.path.join(self.app.path_resolver.galaxy_data, 'upload_store')\n misc.remove(us)\n log.debug(\"Volume attached, mounting {0}\".format(self.fs.mount_point))\n self.mount(self.fs.mount_point)",
"def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)",
"def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )",
"def create_volume(c,i):\n return c.volumes.create(\n size = \"10\",\n display_name = \"instantserver-1\",\n display_description = \"Volume for instantserver-1\",\n imageRef = i\n )",
"def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n data = connection_info['data']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n luns = [i.lun for i in data_disks]\n new_lun = 1\n # azure allow upto 16 extra datadisk, 1 os disk + 1 ephemeral disk\n # ephemeral disk will always be sdb for linux.\n for i in range(1, 16):\n if i not in luns:\n new_lun = i\n break\n else:\n msg = 'Can not attach volume, exist volume amount upto 16.'\n LOG.error(msg)\n raise nova_ex.NovaException(msg)\n disk = self.disks.get(CONF.azure.resource_group, data['disk_name'])\n managed_disk = dict(id=disk.id)\n data_disk = dict(lun=new_lun,\n name=data['disk_name'],\n managed_disk=managed_disk,\n create_option='attach')\n data_disks.append(data_disk)\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Attach Volume to Instance in Azure finish\"),\n instance=instance)",
"def test_extend_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.extended = {'name': '', 'size': '0',\n 'storageserver': ''}\n self.driver.extend_volume(volume, 12)\n expected = {'name': 'volume10', 'size': '2',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,'}\n self.assertDictMatch(expected, self.extended)",
"def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):",
"def add_volume(self, size=100):\n tfvars_file = \"terraform.tfvars.json\"\n with open(os.path.join(self.cluster_path, tfvars_file)) as f:\n tfvars = json.load(f)\n\n cluster_id = tfvars['cluster_id']\n worker_pattern = f'{cluster_id}-worker*'\n logger.info(f'Worker pattern: {worker_pattern}')\n self.create_ebs_volumes(worker_pattern, size)",
"def database_volume_add(volume_obj):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query = query.filter(model.Volume.uuid == volume_obj.uuid)\n volume = query.first()\n if not volume:\n volume = model.Volume()\n volume.uuid = volume_obj.uuid\n volume.name = volume_obj.name\n volume.description = volume_obj.description\n volume.avail_status = json.dumps(volume_obj.avail_status)\n volume.action = volume_obj.action\n volume.size_gb = volume_obj.size_gb\n volume.bootable = volume_obj.bootable\n volume.encrypted = volume_obj.encrypted\n volume.image_uuid = volume_obj.image_uuid\n volume.nfvi_volume_data = json.dumps(volume_obj.nfvi_volume.as_dict())\n session.add(volume)\n else:\n volume.name = volume_obj.name\n volume.description = volume_obj.description\n volume.avail_status = json.dumps(volume_obj.avail_status)\n volume.action = volume_obj.action\n volume.size_gb = volume_obj.size_gb\n volume.bootable = volume_obj.bootable\n volume.encrypted = volume_obj.encrypted\n volume.image_uuid = volume_obj.image_uuid\n volume.nfvi_volume_data = json.dumps(volume_obj.nfvi_volume.as_dict())\n db.commit()",
"def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )",
"def store_volume(volume_name):\n class store(argparse.Action):\n def __call__(self, parser, namespace, values, option_strings = None):\n # Add the new volume to the list of volumes\n volumes = getattr(namespace, \"volumes\", [])\n new_volume = NamedVolume(volume_name, Path(values)) if values else None\n setattr(namespace, \"volumes\", [*volumes, new_volume])\n\n # Allow the new volume to be found by name on the opts object\n setattr(namespace, volume_name.replace('/', '_'), new_volume)\n\n return store",
"def attach_volume(self):\n\n # Choose volume\n volume_id = self._choose_among_available_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Choose instance\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Attach the volume\n print '# Attaching volume \"%s\"!' % volume_id\n if self.compute.attach_volume(volume_id, instance_id):\n print 'The volume has been attached!'\n else:\n print 'The volume could not been attached'",
"def update_volumes():\n print 'do something useful here'",
"def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)",
"def create(self, filesystem=None):\n if not self.size and not self.from_snapshot_id and not self.from_archive:\n log.error('Cannot add a {0} volume without a size, snapshot ID or '\n 'archive url; aborting.'.format(self.fs))\n return False\n # If creating the volume from a snaphost, get the expected volume size\n if self.from_snapshot_id and not self.volume:\n self.snapshot = self.app.cloud_interface.get_snapshot(self.from_snapshot_id)\n if not self.snapshot:\n log.error(\"Did not retrieve Snapshot object for {0}; aborting.\"\n .format(self.from_snapshot_id))\n return False\n # We need a size to be able to create a volume, so if none\n # is specified, use snapshot size\n if not self.size:\n si = self.app.cloud_interface.get_snapshot_info(self.from_snapshot_id)\n self.size = si.get('volume_size')\n # If it does not already exist, create the volume\n if self.status == volume_status.NONE:\n log.debug(\"Creating a new volume of size '%s' in zone '%s' from \"\n \"snapshot '%s' for %s.\"\n % (self.size, self.app.cloud_interface.get_zone(),\n self.from_snapshot_id, self.fs))\n self.volume = self.app.cloud_interface.create_volume(\n self.size,\n self.app.cloud_interface.get_zone(),\n snapshot=self.from_snapshot_id)\n if self.volume:\n # When creating from a snapshot in Euca, volume.size may be None\n self.size = int(self.volume.size or 0)\n log.debug(\"Created a new volume of size '%s' from snapshot '%s' \"\n \"with ID '%s' in zone '%s' for %s.\"\n % (self.size, self.from_snapshot_id, self.volume_id,\n self.app.cloud_interface.get_zone(), self.fs))\n else:\n log.warning(\"No volume object - did not create a volume?\")\n return False\n else:\n log.debug(\"Tried to create a volume for %s but it is in state '%s' \"\n \"(volume ID: %s)\" % (self.fs, self.status, self.volume_id))\n return False\n # Add tags to newly created volumes (do this outside the inital if/else\n # to ensure the tags get assigned even if using an existing volume vs.\n # creating a new one)\n self.app.cloud_interface.add_tag(\n self.volume, 'Name', self.app.config['cluster_name'])\n self.app.cloud_interface.add_tag(\n self.volume, 'bucketName', self.app.config['bucket_cluster'])\n if self.fs:\n self.app.cloud_interface.add_tag(\n self.volume, 'filesystem', self.fs.get_full_name())\n self.app.cloud_interface.add_tag(\n self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles))\n return True",
"def cmd_stor(args):",
"def _create_volume(display_name='test_volume',\n display_description='this is a test volume',\n status='available',\n size=1,\n project_id=fake.PROJECT_ID,\n attach_status=fields.VolumeAttachStatus.DETACHED):\n vol = {}\n vol['host'] = 'fake_host'\n vol['size'] = size\n vol['user_id'] = fake.USER_ID\n vol['project_id'] = project_id\n vol['status'] = status\n vol['display_name'] = display_name\n vol['display_description'] = display_description\n vol['attach_status'] = attach_status\n vol['availability_zone'] = 'fake_zone'\n vol['volume_type_id'] = fake.VOLUME_TYPE_ID\n return db.volume_create(context.get_admin_context(), vol)['id']",
"def execute(self,\n context: context.RequestContext,\n optional_args: dict,\n **kwargs) -> dict[str, Any]:\n\n src_volid = kwargs.get('source_volid')\n src_vol = None\n if src_volid is not None:\n src_vol = objects.Volume.get_by_id(context, src_volid)\n bootable = False\n if src_vol is not None:\n bootable = src_vol.bootable\n elif kwargs.get('snapshot_id'):\n snapshot = objects.Snapshot.get_by_id(context,\n kwargs.get('snapshot_id'))\n volume_id = snapshot.volume_id\n snp_vol = objects.Volume.get_by_id(context, volume_id)\n if snp_vol is not None:\n bootable = snp_vol.bootable\n availability_zones = kwargs.pop('availability_zones')\n volume_properties = {\n 'size': kwargs.pop('size'),\n 'user_id': context.user_id,\n 'project_id': context.project_id,\n 'status': 'creating',\n 'attach_status': fields.VolumeAttachStatus.DETACHED,\n 'encryption_key_id': kwargs.pop('encryption_key_id'),\n # Rename these to the internal name.\n 'display_description': kwargs.pop('description'),\n 'display_name': kwargs.pop('name'),\n 'multiattach': kwargs.pop('multiattach'),\n 'bootable': bootable,\n }\n if len(availability_zones) == 1:\n volume_properties['availability_zone'] = availability_zones[0]\n\n # Merge in the other required arguments which should provide the rest\n # of the volume property fields (if applicable).\n volume_properties.update(kwargs)\n volume = objects.Volume(context=context, **volume_properties)\n volume.create()\n\n # FIXME(dulek): We're passing this volume_properties dict through RPC\n # in request_spec. This shouldn't be needed, most data is replicated\n # in both volume and other places. We should make Newton read data\n # from just one correct place and leave just compatibility code.\n #\n # Right now - let's move it to versioned objects to be able to make\n # non-backward compatible changes.\n\n volume_properties = objects.VolumeProperties(**volume_properties)\n\n return {\n 'volume_id': volume['id'],\n 'volume_properties': volume_properties,\n # NOTE(harlowja): it appears like further usage of this volume\n # result actually depend on it being a sqlalchemy object and not\n # just a plain dictionary so that's why we are storing this here.\n #\n # In the future where this task results can be serialized and\n # restored automatically for continued running we will need to\n # resolve the serialization & recreation of this object since raw\n # sqlalchemy objects can't be serialized.\n 'volume': volume,\n }",
"def create_volume(self, volume):\n # Generate App Instance, Storage Instance and Volume\n # Volume ID will be used as the App Instance Name\n # Storage Instance and Volumes will have standard names\n policies = self._get_policies_for_resource(volume)\n num_replicas = int(policies['replica_count'])\n storage_name = policies['default_storage_name']\n volume_name = policies['default_volume_name']\n\n app_params = (\n {\n 'create_mode': \"openstack\",\n 'uuid': str(volume['id']),\n 'name': _get_name(volume['id']),\n 'access_control_mode': 'deny_all',\n 'storage_instances': {\n storage_name: {\n 'name': storage_name,\n 'volumes': {\n volume_name: {\n 'name': volume_name,\n 'size': volume['size'],\n 'replica_count': num_replicas,\n 'snapshot_policies': {\n }\n }\n }\n }\n }\n })\n self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)",
"def create_volume(self, size=1, name=None, description=None,\n image=None, check=True):\n metadata = '{0}={1}'.format(config.STEPLER_PREFIX,\n config.STEPLER_PREFIX)\n cmd = 'cinder create ' + str(size) + ' --metadata ' + metadata\n if image:\n cmd += ' --image ' + image\n if name:\n cmd += ' --name ' + moves.shlex_quote(name)\n if description is not None:\n cmd += ' --description ' + moves.shlex_quote(description)\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_AVAILABLE_TIMEOUT, check=check)\n volume_table = output_parser.table(stdout)\n volume = {key: value for key, value in volume_table['values']}\n return volume",
"def assign_volume(VolumeId=None, InstanceId=None):\n pass",
"def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only",
"def AttachVolume(self, request, global_params=None):\n config = self.GetMethodConfig('AttachVolume')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def volumes(self):",
"def _add(args):\n\n fs = disdat.fs.DisdatFS()\n\n if not fs.in_context():\n _logger.warning('Not in a data context')\n return\n\n _ = api.add(fs._curr_context.get_local_name(),\n args.bundle,\n args.path_name,\n tags=common.parse_args_tags(args.tag))\n\n return",
"def add_volume(self, oid, volume_id):\n data = {\n \"volumeAttachment\": {\n \"volumeId\": volume_id,\n }\n }\n path = '/servers/%s/os-volume_attachments' % oid\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Add volume %s to server %s: %s' % \n (volume_id, oid, truncate(res)))\n return res[0]['volumeAttachment']"
] | [
"0.64701086",
"0.6464822",
"0.6329343",
"0.6256644",
"0.6083061",
"0.60799366",
"0.60161775",
"0.6015726",
"0.5991257",
"0.59508777",
"0.5932107",
"0.592002",
"0.5906367",
"0.58913493",
"0.5878204",
"0.5808978",
"0.57532054",
"0.5737604",
"0.5717718",
"0.5694623",
"0.5679895",
"0.56731355",
"0.56414425",
"0.56261474",
"0.5607825",
"0.55968463",
"0.55968046",
"0.5592309",
"0.5571626",
"0.5567146"
] | 0.6710524 | 0 |
Get storage volume attachments. [Arguments] | def fusion_api_get_storage_volume_attachments(self, uri=None, param='', api=None, headers=None):
return self.volume_attachment.get(uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_volumes(self, oid):\n path = '/servers/%s/os-volume_attachments' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List volumes for server %s: %s' % \n (oid, truncate(res)))\n return res[0]['volumeAttachments']",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def volume_attachments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volume_attachments\")",
"def _attach_volume(self):\n return []",
"def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)",
"def items(self) -> typing.List[\"VolumeAttachment\"]:\n return typing.cast(\n typing.List[\"VolumeAttachment\"],\n self._properties.get(\"items\"),\n )",
"def getStorageVolumeData(self,node,storage,volume):\n data = self.connect('get','nodes/%s/storage/%s/content/%s' % (node,storage,volume),None)\n return data",
"def Get_Attachments(service, userId, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)",
"def listDeviceAttachments(*args, attribute: AnyStr=\"\", axis: AnyStr=\"\", clutch: AnyStr=\"\",\n device: AnyStr=\"\", file: AnyStr=\"\", selection: bool=True, write:\n bool=True, **kwargs)->AnyStr:\n pass",
"def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})",
"def bootable_volume(volumes):\n for volume in volumes:\n if '/dev/vda' in volume['attachments']:\n return volume",
"def fusion_api_patch_storage_volume_attachments(self, body, param='', api=None, headers=None):\n return self.volume_attachment.patch(body=body, param=param, api=api, headers=headers)",
"def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass",
"def get_attachments(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_ATTACHMENTS.format(expense_id))",
"def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list",
"def volume_get(context, volume_id):\n return _volume_get(context, volume_id)",
"def l10n_mx_edi_retrieve_attachments(self):\n self.ensure_one()\n if not self.l10n_mx_edi_cfdi_name:\n return []\n domain = [\n ('res_id', '=', self.id),\n ('res_model', '=', self._name),\n ('name', '=', self.l10n_mx_edi_cfdi_name )]\n return self.env['ir.attachment'].search(domain)",
"def _get_binary_filesystem(self, cr, uid, ids, name, arg, context=None):\n res = {}\n attachment_obj = self.pool.get('ir.attachment')\n\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = False\n attachment_ids = attachment_obj.search(cr, uid, [('res_model','=',self._name),('res_id','=',record.id),('binary_field','=',name)], context=context)\n import logging\n #_logger = logging.getLogger(__name__)\n #_logger.info('res %s', attachment_ids)\n if attachment_ids:\n img = attachment_obj.browse(cr, uid, attachment_ids, context=context)[0].datas\n #_logger.info('res %s', img)\n res[record.id] = img\n return res",
"def attachments_get(self,\r\n document_id,\r\n attachment_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n attachment_id=attachment_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments/{attachmentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id,\r\n 'attachmentId': attachment_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentResponse.from_dictionary)",
"def attachments_list(self,\r\n document_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentListItem.from_dictionary)",
"def attachments(self):\n return self.properties.get('attachments',\n AttachmentCollection(self.context, ResourcePath(\"attachments\", self.resource_path)))",
"def fusion_api_get_sas_logical_jbod_attachments(self, uri=None, param='', api=None, headers=None):\n return self.sas_logical_jbod_attachments.get(uri=uri, api=api, headers=headers, param=param)",
"def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols",
"def volumes(self):",
"def get_attachments_for(parser, token):\n def next_bit_for(bits, key, if_none=None):\n try:\n return bits[bits.index(key)+1]\n except ValueError:\n return if_none\n\n bits = token.contents.split()\n args = {\n 'obj': next_bit_for(bits, 'get_attachments_for'),\n 'var_name': next_bit_for(bits, 'as', '\"attachments\"'),\n }\n return AttachmentsForObjectNode(**args)",
"def get_attachments(service, user_id, msg_id, save_path):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n if 'parts' not in message['payload']:\n if message['payload']['body']['size'] > 0:\n print(\"Downloading single-part attachment...\")\n file_data = base64.urlsafe_b64decode(message['payload']['body']['data'].encode('UTF-8'))\n path = ''.join([save_path, sanitize_string(message['snippet'][0:70])])\n write_file_to_location(file_data, path)\n elif 'parts' in message['payload']:\n for part in message['payload']['parts']:\n print(\"Downloading multi-part attachment...\")\n if part['filename']:\n data = get_data_from_part(service, user_id, msg_id, part)\n file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))\n path = ''.join([save_path, part['filename']])\n write_file_to_location(file_data, path)\n # Nothing to download\n else:\n return None\n\n except errors.HttpError as error:\n print(f\"An error occurred: {error}\")\n\n return msg_id",
"def getPostAttachment(self,id,filename):\n # GET /posts/$id/attachments/$filename\n pass",
"def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()",
"def attachments(self):\n return self._attachments",
"def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)"
] | [
"0.7300456",
"0.7208481",
"0.6899568",
"0.6569004",
"0.6527376",
"0.63582116",
"0.631158",
"0.6246561",
"0.61307013",
"0.6094036",
"0.60406727",
"0.60042816",
"0.59712315",
"0.5970954",
"0.59668416",
"0.59643215",
"0.5960853",
"0.59218377",
"0.58457655",
"0.579255",
"0.5768678",
"0.5762479",
"0.57462233",
"0.5727898",
"0.57275236",
"0.5693915",
"0.56849015",
"0.5644415",
"0.5611357",
"0.55841315"
] | 0.82864267 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.