query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Adds an enclosure to the appliance [Arguments]
def fusion_api_add_enclosure(self, body, api=None, headers=None): return self.enclosure.add(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addEnclosure(self, enclosure):\n self.enclosures.append(enclosure)", "def addAnimalToEnclosure(self, animal, enclosureID):\n for e in self.enclosures:\n if(enclosureID == e.getID()):\n e.addAnimal(animal)", "def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def enclosure_id(self, enclosure_id):\n\n self._enclosure_id = enclosure_id", "def fusion_api_create_enclosure_group(self, body, api=None, headers=None):\n return self.enclosure_group.create(body, api, headers)", "def InvocationAddEncAlgo(builder, encAlgo):\n return AddEncAlgo(builder, encAlgo)", "def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.update(body, uri, api, headers)", "def add_handout(self, asset_name):\r\n self._handouts.append(asset_name)", "def enclosure_disks(self, enclosure_disks):\n\n self._enclosure_disks = enclosure_disks", "def add_single_enclosure(self, track):\n pin = self.convert_track_to_pin(track)\n (ll,ur) = pin.rect\n self.cell.add_rect(layer=self.get_layer(track.z),\n offset=ll,\n width=ur.x-ll.x,\n height=ur.y-ll.y)", "def InvocationAddEncKey(builder, encKey):\n return AddEncKey(builder, encKey)", "def add(self, arguments):\n url = arguments['<location>']\n if url:\n name = arguments['<name>']\n else:\n url = arguments['<name>']\n name = None\n version = arguments['--box-version']\n force = arguments['--force']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n utils.add_box(url, name=name, version=version, force=force, requests_kwargs=requests_kwargs)", "def bevelPlus(*args, bevelInside: bool=False, capSides: Union[int, bool]=4,\n constructionHistory: bool=True, innerStyle: Union[int, bool]=0, joinSurfaces:\n bool=True, name: AnyStr=\"\", normalsOutwards: bool=True, numberOfSides: Union[int,\n bool]=4, outerStyle: Union[int, bool]=0, polygon: int=0, range: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def addAssociation(self, *args):\n return _libsbml.FbcAnd_addAssociation(self, *args)", "def GroundExcelAddEnemyArmorType(builder, EnemyArmorType):\n return AddEnemyArmorType(builder, EnemyArmorType)", "def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None):\n return self.enclosure_group.update(body, uri, api, headers)", "def fusion_api_put_drive_enclosure(self, body, uri, param='', api=None, headers=None):\n return self.drive_enclosure.put(body=body, uri=uri, param=param, api=api, headers=headers)", "def attach_pwn(args):\n container_name = _read_container_name()\n\n # FIXME Is it better that we just exec it with given name?\n conts = container.list(filters={'name':container_name})\n if len(conts) != 1:\n raise InstallationError('Installation seems to be run. There are more than one image called ancypwn')\n _attach_interactive(conts[0].name)", "def do_add(self, args):\n argument_list = args.split()\n if len(argument_list) < 1:\n self.__bad_arguments(\"add\")\n else:\n print \"Added \" + args + \".\"\n AssassinsManager.add_assassin(self.assassins_manager, args.split()[0])", "def add(isamAppliance, username, federation_id, aliases, type=None, partner_id=None, check_mode=False, force=False):\n warnings = [\"Idempotency has not been coded for this function.\"]\n if force is True or _check(isamAppliance, username) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True, warnings=warnings)\n else:\n fed_id = federation_id\n if partner_id is not None:\n fed_id = fed_id + \"|\" + partner_id\n json_data = {\n \"username\": username,\n \"federation_id\": fed_id,\n \"aliases\": aliases\n }\n if type is not None:\n json_data['type'] = type\n return isamAppliance.invoke_post(\n \"Create an alias association\", uri, json_data, warnings=warnings,\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def addAssociation(self, *args):\n return _libsbml.FbcOr_addAssociation(self, *args)", "def fusion_api_create_logical_enclosure_payload(self,\n name,\n api=None,\n enclosureGroupUri=None,\n enclosureUris=[],\n firmwareBaselineUri=\"\",\n forceInstallFirmware=0):\n return (self.logical_enclosure.make_body(name=name,\n api=api,\n enclosureGroupUri=enclosureGroupUri,\n enclosureUris=enclosureUris,\n firmwareBaselineUri=firmwareBaselineUri,\n forceInstallFirmware=forceInstallFirmware))", "def addAssociation(self, *args):\n return _libsbml.Association_addAssociation(self, *args)", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def fusion_api_create_enclosure_group_payload(self, body, lig_map=None, api=None):\n return self.enclosure_group.make_body(api, body, lig_map)", "def add_envelope(self) -> reapy.Envelope:\r\n ...", "def _add_consume(self, name):\n self._consumes.append(\"- {name}\\n\")\n pass", "def addCompartmentGlyph(self, *args):\n return _libsbml.Layout_addCompartmentGlyph(self, *args)" ]
[ "0.70115876", "0.56879956", "0.5439087", "0.5382266", "0.53373885", "0.5330535", "0.52169186", "0.5206843", "0.5172596", "0.51174074", "0.5097141", "0.5072619", "0.50204265", "0.50086886", "0.49856225", "0.49476922", "0.49233353", "0.48971295", "0.48884445", "0.48786825", "0.48719966", "0.48291206", "0.48260894", "0.48254725", "0.48254725", "0.48254725", "0.4825432", "0.47848567", "0.47469398", "0.47378787" ]
0.62107855
1
Update an enclosure. Currently the only attribute that can be updated is the name. [Arguments]
def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None): return self.enclosure.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None):\n return self.enclosure_group.update(body, uri, api, headers)", "def enclosure_id(self, enclosure_id):\n\n self._enclosure_id = enclosure_id", "def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None):\n return self.enclosure.patch(body, uri, api, headers, etag)", "def addEnclosure(self, enclosure):\n self.enclosures.append(enclosure)", "def update_object(self, name: str) -> None:", "def fusion_api_update_logical_enclosure(self, body, uri, param='', api=None, headers=None, etag=None):\n return self.logical_enclosure.put(body, uri, param, api, headers, etag)", "def __setitem__(self, name, value):\n self.gattrs[name] = value", "def fusion_api_patch_drive_enclosure(self, body, uri, api=None, headers=None):\n return self.drive_enclosure.patch(body, uri, api, headers)", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)", "def update_E(self):", "def update(self, *args, **kwargs):\n assign = ('id', 'width', 'height', 'x', 'y')\n if args:\n for key, idx in zip(assign, range(len(args))):\n exec('self.{} = {}'.format(key, args[idx]))\n else:\n for key, val in kwargs.items():\n if key in ('id', 'width', 'height', 'x', 'y'):\n exec('self.{} = {}'.format(key, val))", "def updateName(self,name):\n self.name = name", "def update(self, *args, **kwargs):", "def addAnimalToEnclosure(self, animal, enclosureID):\n for e in self.enclosures:\n if(enclosureID == e.getID()):\n e.addAnimal(animal)", "def update(self, *args, **kwargs):\n if len(args) != 0:\n i = 0\n attr = ['id', 'width', 'height', 'x', 'y']\n for arg in args:\n setattr(self, attr[i], args[i])\n i += 1\n else:\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __setitem__(self, name, value) -> None:\n self.__setattr__(name, value)", "def fusion_api_put_drive_enclosure(self, body, uri, param='', api=None, headers=None):\n return self.drive_enclosure.put(body=body, uri=uri, param=param, api=api, headers=headers)", "def update(self, *args, **kwargs):\n attrs = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n\n if args:\n for i in range(len(args)):\n setattr(self, attrs[i], args[i])\n elif kwargs is not None:\n for key, value in kwargs.items():\n if key in attrs:\n setattr(self, key, value)", "def update(self, attrs):\n if attrs.get('name'):\n self.name = string.capwords(attrs.get('name'))\n if attrs.get('description'):\n self.description = attrs.get('description')\n if attrs.get('author'):\n self.author = attrs.get('author')\n\n try:\n db.session.add(self)\n db.session.commit()\n except IntegrityError as err:\n if isinstance(err.orig, UniqueViolation):\n raise Conflict(\"Name already used by another exercise.\")\n raise UnexpectedError(DATABASE_ERROR_MSG)\n except DBAPIError as err:\n raise UnexpectedError(DATABASE_ERROR_MSG)", "def __setitem__(self, name, attribs):\n \n assert(type(attribs) is list)\n \n self.register(Command(*([name] + attribs)))", "def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "def update(self, *args, **kwargs):\n if args and len(args) > 0:\n keys = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i, v in enumerate(args):\n setattr(self, keys[i], v)\n else:\n for k, v in kwargs.items():\n setattr(self, k, v)", "def update(self, *args, **kw):\n pass", "def update(self, *args, **kwargs):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n if len(args) > 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n self.id = kwargs.get(\"id\", self.id)\n self.size = kwargs.get(\"size\", self.size)\n self.x = kwargs.get(\"x\", self.x)\n self.y = kwargs.get(\"y\", self.y)" ]
[ "0.6253273", "0.58676934", "0.5731988", "0.56564224", "0.5643177", "0.55411965", "0.5414261", "0.5191689", "0.5146749", "0.5146749", "0.5146749", "0.5146749", "0.5146749", "0.5146749", "0.51454026", "0.5128274", "0.50855374", "0.50823885", "0.507266", "0.50626427", "0.504875", "0.5031071", "0.50251466", "0.50236636", "0.502321", "0.49948296", "0.49947867", "0.49935082", "0.4977326", "0.49720424" ]
0.6698176
0
Issues a PATCH request to an enclosure. See RESTAPI docs for valid request bodies [Arguments]
def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None): return self.enclosure.patch(body, uri, api, headers, etag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)", "def patch(self, url, body=None, headers=None):\n return self._request('PATCH', url, body, headers)", "def httpPatch(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('PATCH', url, data, params, headers)", "def patch(self, url_or_path, *args, **kwargs):\n return self.request.patch(url_or_path, *args, **kwargs).json()", "def patch(resource, data, **kwargs):\n\tresp = requests.patch(\n\t\t_endpoint(resource, 'PATCH'),\n\t\tparams=_jsonify_dict_values(kwargs),\n\t\tdata=json.dumps(data),\n\t\theaders=PAYLOAD_HEADERS,\n\t\tverify=SERVER_CERT\n\t)\n\tresp.raise_for_status()\n\treturn resp.json()", "def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)", "def client_patch(self, path, data=None, content_type=client.MULTIPART_CONTENT, follow=False, **extra):\r\n\r\n data = data or {}\r\n response = super(client.Client, self).patch(path, data=data, content_type=content_type, **extra)\r\n if follow:\r\n response = self._handle_redirects(response, **extra)\r\n return response", "def _patch(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='PATCH',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()", "def patch(self, endpoint, params=None, data=None):\n params = params or dict()\n data = data or dict()\n return self.request(verb=requests.patch, address=self.project_address + endpoint,\n params=params, data=data)", "def patch(url, data=None, **kwargs):\n\n return request('patch', url, data=data, **kwargs)", "def patch(self, obj):\r\n self.require_item()\r\n request = http.Request('PATCH', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json", "def patch(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'patch', api_path, *args, **kwargs)", "def fusion_api_patch_interconnect(self, body, uri, param='', api=None, headers=None):\n return self.ic.patch(body=body, uri=uri, api=api, headers=headers, param=param)", "def fusion_api_patch_fabric(self, uri, body, api=None, headers=None):\n return self.fabric.patch(uri, body, api, headers)", "def patch(self, path, body):\n url = urljoin(self.api_endpoint, path)\n response = requests.patch(url, json=body, headers=self.headers)\n return self._check_response(response)", "def patch(self, *args, **kwargs):\n return self.handle_patch_request()", "def patch(self, url, params='', headers=None, extra_environ=None,\n status=None, upload_files=None, expect_errors=False,\n content_type=None):\n return self._gen_request(RequestMethods.PATCH,\n url, params=params, headers=headers,\n extra_environ=extra_environ, status=status,\n upload_files=upload_files,\n expect_errors=expect_errors,\n content_type=content_type)", "def patch(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"PATCH\", endpoint, content, params)", "def simulate_patch(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PATCH', path, **kwargs)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def patch(self, uri, format='json', data=None, authentication=None, **kwargs):\r\n content_type = self.get_content_type(format)\r\n kwargs['content_type'] = content_type\r\n\r\n if data is not None:\r\n kwargs['data'] = self.serializer.serialize(data, format=content_type)\r\n\r\n if authentication is not None:\r\n kwargs['HTTP_AUTHORIZATION'] = authentication\r\n\r\n # This hurts because Django doesn't support PATCH natively.\r\n parsed = urlparse(uri)\r\n r = {\r\n 'CONTENT_LENGTH': len(kwargs['data']),\r\n 'CONTENT_TYPE': content_type,\r\n 'PATH_INFO': self.client._get_path(parsed),\r\n 'QUERY_STRING': parsed[4],\r\n 'REQUEST_METHOD': 'PATCH',\r\n 'wsgi.input': FakePayload(kwargs['data']),\r\n }\r\n r.update(kwargs)\r\n return self.client.request(**r)", "def fusion_api_generic_patch(self, body, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.patch(uri=uri, headers=headers, body=json.dumps(body))", "def requestfactory_patch(self, path, data=None, content_type=client.MULTIPART_CONTENT, **extra):\r\n\r\n data = data or {}\r\n patch_data = self._encode_data(data, content_type)\r\n\r\n parsed = urlparse.urlparse(path)\r\n request = {\r\n 'CONTENT_LENGTH': len(patch_data),\r\n 'CONTENT_TYPE': content_type,\r\n 'PATH_INFO': self._get_path(parsed),\r\n 'QUERY_STRING': parsed[4],\r\n 'REQUEST_METHOD': 'PATCH',\r\n 'wsgi.input': client.FakePayload(patch_data),\r\n }\r\n request.update(extra)\r\n return self.request(**request)", "def simulate_patch(self, path='/', **kwargs):\n return self.simulate_request('PATCH', path, **kwargs)", "def test_client_can_do_patch_request(self):\n response = self.httpbin_4.test_requests_patch_method()\n self.assertEqual(response.request.method, 'PATCH')\n self.assertEqual(response.status_code, 200)", "def patch(self, request , pk=None):\n return Response({'message':'PATCH'})", "def patch(*args, **kwargs):\n return update(*args, patch=True, **kwargs)", "def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})", "def patch(self, request, pk=None):\n\n return Response({'method': 'patch'})", "def _patch(self, path=None, version=None, params=None,\n data=None, json=None, header=None):\n return self.client.patch(module='mam', path=path, version=version,\n params=params, data=data,\n json=json, header=header)" ]
[ "0.7322979", "0.68914396", "0.68123513", "0.6810135", "0.67109245", "0.6618471", "0.660938", "0.65981245", "0.65977496", "0.6591905", "0.6591386", "0.6590417", "0.65834326", "0.6566933", "0.6551695", "0.6551422", "0.652787", "0.6520931", "0.65115", "0.6510823", "0.6449295", "0.6434416", "0.64131975", "0.63904715", "0.6387297", "0.63822615", "0.6381012", "0.635175", "0.6349887", "0.6338351" ]
0.71574557
1
Removes an enclosure from the appliance based on name OR uri [Arguments]
def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None): return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_enclosure_group(self, name=None, uri=None, api=None, headers=None):\n return self.enclosure_group.delete(name, uri, api, headers)", "def remove(name):", "def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)", "def removeAnimalFromEnclosure(self, enclosureID, animalID):\n animalID = int(animalID)\n enclosureID = int(enclosureID)\n for i in range(0, len(self.enclosures)):\n if(self.enclosures[i].getID() == enclosureID):\n self.enclosures[i].removeAnimal(animalID)\n\n for employee in self.employees:\n if(employee.getRole() == \"Veterinarian\" and employee.getAssignmentID() == animalID):\n employee.removeAssignment()", "def removeEnclosure(self, enclosureID):\n index = -1\n for i,enclosure in enumerate(self.enclosures):\n if int(enclosure.getID()) == int(enclosureID):\n index = i\n if(index != -1):\n for employee in self.employees:\n if(employee.getRole() == \"Zookeeper\"):\n if(int(employee.getAssignmentID()) == int(enclosureID)):\n employee.removeAssignment()\n del self.enclosures[index]", "def remove(self, egg):", "def removeItem(*args):", "def removeItem(*args):", "def remove_hero(apps, schema_editor):\n pass", "def remove_asset(self, name):\n if name in self.assets:\n del self.assets[name]", "def __delitem__(self, name):\n name = name.lower()\n del self._items[name]\n self._names.remove(name)", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)", "def cleanup(name, client=None):\n credential_specs_path = _get_path(client)\n path = os.path.join(credential_specs_path, name + '.json')\n fs.rm_safe(path)", "def del_image(self, name):\r\n if self.images is None or name not in self.images:\r\n return\r\n l = self.images\r\n self.images = None\r\n l.setdefault('/empties/', [])\r\n # push the number on the empties list\r\n l['/empties/'].append(l[name])\r\n del l[name]\r\n self.images = l", "def deleteInstrumentFromName(self, name):\n matching_instruments = list(filter(lambda x: x.name == name,\n self.instruments))\n assert len(matching_instruments) == 1\n del self.instruments[name]", "def remove_from_hand(self):\n pass", "def fusion_api_remove_switch(self, name=None, uri=None, api=None, headers=None):\n return self.switch.delete(name, uri, api, headers)", "def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def remove_asset(self, short_name):\n del self._assets[short_name]", "def remove(self, attributeIndexOrName) -> None:\n ...", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def uninstall(self, provider):\n pass # pragma: no cover", "def fusion_api_remove_license(self, uri=None, api=None, headers=None):\n return self.license.delete(uri=uri, api=api, headers=headers)", "def fusion_api_delete_fabric(self, name=None, uri=None, api=None, headers=None):\n return self.fabric.delete(name, uri, api, headers)", "def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)", "def remove_mix(self, name: str) -> None:\n self.remove(name)" ]
[ "0.59871954", "0.59854525", "0.5881928", "0.5877077", "0.56499386", "0.5634551", "0.54864186", "0.5345892", "0.5345892", "0.52494717", "0.5209986", "0.51710707", "0.51697314", "0.51480526", "0.51217175", "0.5116495", "0.50978315", "0.50975955", "0.5095221", "0.509381", "0.50551784", "0.5044714", "0.50252897", "0.50252646", "0.50183976", "0.49833077", "0.49636298", "0.4941764", "0.4933424", "0.49276438" ]
0.68903214
0
Gets a default or paginated collection of Enclosures [Arguments]
def fusion_api_get_enclosures(self, uri=None, param='', api=None, headers=None): return self.enclosure.get(uri=uri, param=param, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEnclosures(self):\n return self.enclosures", "def get_enclosures(self, controller='all'):\n output = self.run_json('/c{}/eall show all'.format(controller))\n try:\n controllers = output['Controllers']\n except KeyError:\n raise StorcliException('Output is missing Controllers segment')\n\n return [c['Response Data'] for c in controllers]", "def collectionContainer(): # This name cannot be changed\n return [sampleMethodObject(), sampleMethodObject()]", "def fusion_api_get_enclosure_groups(self, uri=None, param='', api=None, headers=None):\n return self.enclosure_group.get(uri=uri, api=api, headers=headers, param=param)", "def get_collections(self): # real signature unknown; restored from __doc__\n return []", "def fusion_api_get_sas_li_logical_drive_enclosures(self, uri=None, param='', api=None, headers=None):\n param = \"/logical-drive-enclosures%s\" % param\n return self.sasli.get(uri=uri, param=param, api=api, headers=headers)", "def get_all(self, **kwargs):\n context = pecan.request.context\n policy.enforce(context, \"container:get_all\",\n action=\"container:get_all\")\n return self._get_containers_collection(**kwargs)", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def paginated_call(self) -> global___Snippet.ClientCall:", "async def get_keys(self, collection):\n raise NotImplementedError", "def get_full_container_list(container_name, **kwargs):\n limit = 10000\n kwargs['limit'] = limit\n page = []\n seed = []\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n while len(page) == limit:\n # keep getting pages..\n kwargs['marker'] = seed[-1]['name']\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n return seed", "def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])", "def paginated_retrieval(methodname, itemtype):\n return compose(\n reusable,\n basic_interaction,\n map_yield(partial(_params_as_get, methodname)),\n )", "def keys(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return itertools.imap(ITEMGETTER_1,\n self.pairs(args, lo, hi, reverse, max, include, txn))", "def get_many(self, keys, default=None):\n raise NotImplementedError()", "def get_all_books() -> List[Dict]:\n pass", "def get_default_scopes(self, application=None, request=None, *args, **kwargs):\n # at the moment we assume that the default scopes are all those availables\n return list(ProtectedCapability.objects.filter(default=True).values_list('slug', flat=True))", "def call(self) -> List[Dict]:", "def get_all(self, name):\n\t\tpass", "def __call__(self):\n return self.get_items()", "async def get_keys(self, collection):\n _LOGGER.debug(\"Getting %s from memory.\", collection)\n results = []\n for database in self.databases:\n results.append(await database.get_keys(collection))\n return results[0]", "def get_cards(query_param):\n return _query_scryfall(query_param)", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass", "def get_paginator(operation_name=None):\n pass" ]
[ "0.605165", "0.54892236", "0.5281014", "0.521672", "0.51407933", "0.51326174", "0.5096011", "0.5005433", "0.50014704", "0.4951049", "0.4901131", "0.4885012", "0.48817846", "0.48252463", "0.4802431", "0.47987792", "0.47889432", "0.47860327", "0.47808155", "0.47803268", "0.4768455", "0.4767061", "0.47594762", "0.47594762", "0.47594762", "0.47594762", "0.47594762", "0.47594762", "0.47594762", "0.47594762" ]
0.6208389
0
Refreshes a specified Enclosure URI [Arguments]
def fusion_api_refresh_enclosure(self, body={"refreshState": "RefreshPending"}, uri=None, api=None, headers=None): return self.enclosure.put(body, uri=uri, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self, url, args, cancellationSignal):\n pass", "def Refresh(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"refresh\", payload=payload, response_object=None)", "def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.update(body, uri, api, headers)", "def update(self, uri, values, where, selectionArgs):\n pass", "def command_refresh_repo(self):\n repoinit.refresh(*self.args())", "def uri(self, uri):\n self._uri = uri", "def uri(self, uri):\n self._uri = uri", "def _urlfetch(**kwargs):\n return ndb.get_context().urlfetch(**kwargs)", "def uri(self, uri):\n\n self._uri = uri", "def uri(self, uri):\n\n self._uri = uri", "def refresh():\n return __apf_cmd(\"-e\")", "def refresh(self, url=None):\n if url is None:\n if 'url' in self._values:\n url = self._values['url']\n else:\n ex = ValueError(\"Can't refresh {object} without a URL\".format(object=type(self).__name__))\n raise ex\n new_item = self._endpoint._get(url) # luckily this object has the URL we can refresh from\n self._values.clear()\n self._values.update(new_item._values)", "def fusion_api_refresh_switch(self, uri, api=None, headers=None):\n return self.switch.refresh(uri, api, headers)", "def update_H(self, curl_E):", "def cli(since, _input, digests):\n if \"://\" in _input:\n coro = make_digest([_input])\n else:\n coro = make_digests_from_config(_input, digests or None, since=since)\n\n main_run(coro)", "def set_uri(self, uri):\r\n self.uri = uri", "def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)", "def call(self, uri, method, arg, extras):\n pass", "def set_hashes(self, url, hashes):", "def refresh(self, new_content):\n pass", "def fusion_api_fabric_manager_refresh(self, body, uri, api=None, headers=None):\n param = '/snapshot/'\n return self.fabricmanager.put(body=body, uri=uri, param=param, api=api, headers=headers)", "def refresh(dataset, client):\n pass", "def set_uri(self, uri):\n self.__uri = uri", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None):\n return self.enclosure.patch(body, uri, api, headers, etag)", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def refresh_view():\n pass", "def RefreshLearnedInformation(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('refreshLearnedInformation', payload=payload, response_object=None)", "def refresh_urls(environ, start_response):\n store = environ['tiddlyweb.store']\n config = environ['tiddlyweb.config']\n \n register_urls(store, config)\n \n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n return 'All URLs have been updated'", "def patch(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError" ]
[ "0.67384696", "0.5728506", "0.53511006", "0.5258038", "0.5185052", "0.5173393", "0.5173393", "0.5118185", "0.5077461", "0.5077461", "0.5072432", "0.50466037", "0.49766743", "0.49741447", "0.49734172", "0.49493074", "0.4925644", "0.49079332", "0.48920047", "0.48747337", "0.48434016", "0.48350692", "0.48325092", "0.47971517", "0.47793442", "0.47763798", "0.47514248", "0.47467208", "0.47232887", "0.47018394" ]
0.5801159
1
Import server hardware type for a specified Enclosure [Arguments]
def fusion_api_import_server_hardware_type_for_enclosure(self, body, uri, api=None, headers=None): return self.enclosure.post(body, uri, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(args):\n print(\"[INFO] args:\", json.dumps(args))\n\n token = args['_parameters'].get('token')\n device_type = args['_parameters'].get('device_type')\n\n if not token:\n print(\"[ERROR] Ubidots token not specified\")\n return {\"status\":\"error\"}\n\n elif not device_type and token:\n print(\"[INFO] device type not specified\")\n device_type = \"\"\n\n if device_type != \"\":\n device_type_data = set_device_type(device_type)\n try:\n res = create_device_type(device_type_data, token)\n print(res)\n if res.status_code == 409:\n print(\"[INFO] A device type with this name already exists.\")\n elif res.status_code == 201:\n print(\"[INFO] Device type created successfully.\")\n except Exception as e:\n print(\"[INFO] Setup function ran, but could not create a device type.\")\n print(e)\n else:\n print({\"[INFO] No device type created\"})\n\n return {\"status\":\"finished\"}", "def hardware(*args, brdType: bool=True, cpuType: bool=True, graphicsType: bool=True, megaHertz:\n bool=True, numProcessors: bool=True, **kwargs)->AnyStr:\n pass", "def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.add(args)", "def _import_bh_(self):", "def run_import(input_path: str, output_path: str, typ: str) -> str:\n cmd = ''\n if typ.startswith(\"FeatureTable\"):\n if not input_path.endswith('biom'):\n cur_biom = '%s.biom' % splitext(input_path)[0]\n cmd += 'biom convert \\\\\\n'\n cmd += ' -i %s \\\\\\n' % input_path\n cmd += ' -o %s \\\\\\n' % cur_biom\n cmd += ' --table-type=\"OTU table\" \\\\\\n'\n cmd += ' --to-hdf5\\n\\n'\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % cur_biom\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"FeatureTable[Frequency]\"\\n'\n else:\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % input_path\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"FeatureTable[Frequency]\"\\n'\n else:\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % input_path\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"%s\"\\n' % typ\n return cmd", "def hw_from_req(req):\n return req.app['com.opentrons.hardware']", "def _import_elmo():\n\n elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz',\n trainable=False) # news\n # elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-twitter_2013-01_2018-04_600k_steps.tar.gz',\n # trainable=False) # twitter\n print('❤️ ❤️ ❤️ DONE (re)importing Tensorflow hub.Module ')\n print('Tensorflow version is', tf.__version__)\n\n return elmo", "def main(args): \n if args.type == 'FILEGDB':\n create_filegdb(args.name, args.path)\n elif args.type == 'ST_GEOMETRY' or args.type == 'SPATIALITE':\n create_sqlitedb(args.name, args.type, args.path)", "def hdevtools_type(filename, line, column, cabal = None):\n return call_hdevtools_and_wait(['type', filename, str(line), str(column)], filename = filename, cabal = cabal)", "def importer():\n pass", "def main():\n\n obj = PowerStoreNfsExport()\n obj.perform_module_operation()", "def load_device():", "def fusion_api_add_server_hardware(self, body, api=None, headers=None, param=''):\n return self.sh.post(body, api, headers, param)", "def cmd_type(args):", "def init_from_entity(self, entity):\r\n\r\n if entity.type.lower() == '1_static_mesh':\r\n return UnrealImporter(entity, StaticImportTaskStrategy(),\r\n AssetExecuteTaskStrategy())\r\n\r\n elif entity.type.lower() == '2_skeletal_mesh':\r\n return UnrealImporter(entity, SkeletalImportTaskStrategy(),\r\n AssetExecuteTaskStrategy())\r\n\r\n else:\r\n raise NotImplementedError('No implementation for the \"{}\" file type'.format(entity.type))", "def import_data(nb, tickers, types, First = True):\n pa = os.getcwd().replace(\"\\\\\",\"/\") + \"/sec-edgar-filings\"\n for i in types: # looping over different types\n exec(nb,tickers,pa,i,First)", "def import_module(self, location, name):", "def fusion_api_edit_server_hardware_types(self, body, uri, api=None, headers=None):\n return self.types.update(body, uri, api, headers)", "def server_type(self):\n ...", "def add_machine(args):\n session = Session()\n # the following is used to help with code completion\n env = Environment(name=args.environment)\n try:\n env = session.query(Environment).filter_by(name=args.environment).one()\n except NoResultFound:\n print \"ERROR: couldn't find environment %s\" % args.environment\n sys.exit(1)\n machine = PoolMachine(name=args.name, hostname=args.hostname, environment=env, online=True)\n session.add(machine)\n session.commit()\n print repr(machine)", "def do_poortego_import(self, arg, opt):\n poortego_import(self.my_interface, arg, opt)", "def _handle_load_module(self, name: str, module_type: Type[Module]) -> ModuleType:\n if module_type not in [FeatureModule, ProtocolModule]:\n raise TypeError(f\"Invalid type '{module_type}'\")\n type_str = \"feature\" if module_type is FeatureModule else \"protocol\"\n try:\n module = module_type(f\"ZeroBot.{type_str}.{name}\")\n except ModuleNotFoundError as ex:\n raise NoSuchModule(f\"Could not find {type_str} module '{name}': {ex}\", mod_id=name, exc=ex) from None\n except Exception as ex:\n raise ModuleLoadError(f\"Failed to load {type_str} module '{name}'\", mod_id=name) from ex\n self.logger.debug(f\"Imported {type_str} module {module!r}\")\n return module", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def main():\n\n # create an auth ticket for APIC-EM\n\n global APIC_EM_TICKET # make the ticket a global variable in this module\n APIC_EM_TICKET = get_service_ticket()\n\n # build a list with all device id's\n switch_id_list = get_switch_ids()\n switches_info = collect_switch_info(switch_id_list)\n\n # ask user for filename input and save file\n filename = get_input_file()\n output_file = open(filename, 'w', newline='')\n output_writer = csv.writer(output_file)\n for lists in switches_info:\n output_writer.writerow(lists)\n output_file.close()\n # pprint(switches_info) # print for data validation", "def fusion_api_get_server_hardware_types(self, uri=None, param='', api=None, headers=None):\n return self.types.get(uri=uri, api=api, headers=headers, param=param)", "def new_entity_type(name, client=default):\n data = {\"name\": name}\n return raw.create(\"entity-types\", data, client=client)", "def main(args):\n # server on other machine needs to connect to this machines IP\n client = WheelchairClientProtocol(args)\n client.listen('0.0.0.0', 9999)\n\n # server on this machines needs to connect to other machines IP\n server = WheelchairServerProtocol(args)\n server.connect('192.168.1.106', 9999)", "def server_type_name(self):\n ...", "def run_pytype(*, filename: str, python_version: str, python_exe: str, typeshed_location: str) -> Optional[str]:\n options = pytype_config.Options.create(\n filename,\n module_name=_get_module_name(filename),\n parse_pyi=True,\n python_version=python_version,\n python_exe=python_exe)\n old_typeshed_home = os.environ.get(TYPESHED_HOME, UNSET)\n os.environ[TYPESHED_HOME] = typeshed_location\n try:\n pytype_io.parse_pyi(options)\n except Exception:\n stderr = traceback.format_exc()\n else:\n stderr = None\n if old_typeshed_home is UNSET:\n del os.environ[TYPESHED_HOME]\n else:\n os.environ[TYPESHED_HOME] = old_typeshed_home\n return stderr", "def load(self, eng):\n eng.eval(\"load_system('simulink_househeat')\", nargout=0)" ]
[ "0.52070135", "0.51823914", "0.5084796", "0.505941", "0.4996679", "0.49802524", "0.49404544", "0.49346665", "0.48633078", "0.48547235", "0.48510465", "0.48264375", "0.48194587", "0.4804756", "0.47828266", "0.4745861", "0.47377896", "0.47187185", "0.46815425", "0.46599594", "0.46400997", "0.46265373", "0.46263483", "0.4625469", "0.46216714", "0.46119204", "0.46084034", "0.46080348", "0.4565203", "0.4559136" ]
0.63638186
0
Creates the payload required to create an enclosure group [Arguments]
def fusion_api_create_enclosure_group_payload(self, body, lig_map=None, api=None): return self.enclosure_group.make_body(api, body, lig_map)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_create_enclosure_group(self, body, api=None, headers=None):\n return self.enclosure_group.create(body, api, headers)", "def post_security_group_create(self, resource_dict):\n pass", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}", "def test_create_resource_group(self):\n pass", "def format_payload(enc, **kwargs):\n payload = {\"enc\": enc}\n load = {}\n for key in kwargs:\n load[key] = kwargs[key]\n payload[\"load\"] = load\n return package(payload)", "def CreateGroupPostData(input, collection, grouping, item, groupname):\n root = etree.Element(collection)\n name = etree.SubElement(root, 'name')\n name.text = groupname\n is_smart = etree.SubElement(root, 'is_smart')\n is_smart.text = 'false'\n itemlist = etree.SubElement(root, grouping)\n \n for i in input:\n add_element = etree.SubElement(itemlist, item)\n add_element_id = etree.SubElement(add_element, 'id')\n add_element_id.text = i\n \n return etree.tostring(root)", "def pre_security_group_create(self, resource_dict):\n pass", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def GroupPayload(self):\n\t\tif self.Group:\n\t\t\treturn \"<group-id>%s</group-id>\" % (self.Group)\n\t\telse:\n\t\t\treturn None", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def build_payload(self, **kwargs):\n\n return None", "def post(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n logging.info(\"[UWEB] add group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n cid = data.cid\n name = data.name\n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n group_info = dict(cid=cid,\n name=name,\n type=UWEB.GROUP_TYPE.NEW)\n gid = add_group(group_info, self.db, self.redis)\n # NOTE: wspush to client\n tid = self.current_user.tid\n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status,\n dict_=dict(gid=gid,\n cid=cid,\n name=name))\n\n except Exception as e:\n logging.exception(\"[UWEB] Create group failed. uid: %s, Exception: %s\",\n self.current_user.uid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def create_security_group(self, body=None):\r\n return self.post(self.security_groups_path, body=body)", "def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def assignmentCreate(groupId):\n postData = request.json\n new_channel = db.channels.insert_one(\n {\n \"name\": postData.get(\"name\"),\n \"dis\": postData.get(\"dis\"),\n \"category\": \"assignments\",\n \"groupId\": groupId,\n }\n )\n insertAssignment = db.Assignment.insert_one(\n {\n \"name\": postData.get(\"name\"),\n \"dis\": postData.get(\"dis\"),\n \"maxGrade\": postData.get(\"maxGrade\"),\n \"dueDate\": postData.get(\"dueDate\"),\n \"startDate\": postData.get(\"startDate\"),\n \"url\": postData.get(\"url\"),\n \"channelId\": new_channel.inserted_id\n }\n )\n\n getId = insertAssignment.inserted_id\n assignment = db.Assignment.find_one({\"_id\": ObjectId(getId)})\n print(f\"Assignment {assignment}\")\n\n group = db.Group.find_one({\"_id\": ObjectId(groupId)})\n print(f\"Group from EOF: {group}\")\n group[\"assignmentIds\"].append(assignment[\"_id\"])\n print(f\"Group assignmentIds after append: {group['assignmentIds']}\")\n return jsonify({\"msg\": \"Your assignment has been created.\"}), 200", "def make_EnclosureGroupV200(associatedLIGs, name,\n powerMode='RedundantPowerSupply'):\n ligUri = associatedLIGs['uri']\n icms = associatedLIGs['interconnectMapTemplate']['interconnectMapEntryTemplates']\n ligs = []\n # With the 200 API, the LIG uri can only be assigned if the LIG contains a\n # definition of the interconnect bay. I.E. if the LIG only has ICM 1 and 2\n # defined then 3 - 8 must be set to None. I.E:\n # 'interconnectBayMappings': [{'interconnectBay': 1,\n # 'logicalInterconnectGroupUri':\n # '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'},\n # {'interconnectBay': 2,\n # 'logicalInterconnectGroupUri':\n # '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'}]}\n # {'interconnectBay': 3,\n # 'logicalInterconnectGroupUri': None},\n # {'interconnectBay': 4,\n # 'logicalInterconnectGroupUri': None},\n # ...\n for N in range(1, 9):\n if N > len(icms):\n ligs.append({'interconnectBay': N,\n 'logicalInterconnectGroupUri': None})\n else:\n ligs.append({'interconnectBay': N,\n 'logicalInterconnectGroupUri': ligUri})\n return {\n 'name': name,\n 'type': 'EnclosureGroupV200',\n 'stackingMode': 'Enclosure',\n 'powerMode': powerMode,\n 'enclosureCount': 1,\n 'enclosureTypeUri': \"/rest/enclosure-types/c7000\",\n 'interconnectBayMappingCount': 8,\n 'interconnectBayMappings': ligs\n }", "def make_payload(self):\n return Payload(names=self.names)", "def package(payload):\n return salt.utils.msgpack.dumps(payload)", "async def create(self, payload):\n\n return await self.creator.write(payload)" ]
[ "0.6800435", "0.6043101", "0.58894837", "0.5621423", "0.5621423", "0.5619241", "0.5539789", "0.5523746", "0.5501674", "0.5499325", "0.54890144", "0.5478458", "0.5457122", "0.5455454", "0.5379649", "0.53306776", "0.5327032", "0.53155494", "0.53141737", "0.5246716", "0.52463305", "0.5244351", "0.52310836", "0.521864", "0.521213", "0.5208201", "0.51919276", "0.51589054", "0.513531", "0.5113192" ]
0.73352313
0
Creates an enclosure group [Arguments]
def fusion_api_create_enclosure_group(self, body, api=None, headers=None): return self.enclosure_group.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_create_enclosure_group_payload(self, body, lig_map=None, api=None):\n return self.enclosure_group.make_body(api, body, lig_map)", "def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator", "def add_group(self, *args, **kwds):\n title = kwds.pop('title', None)\n description = kwds.pop('description', None)\n if kwds:\n raise Exception('unknown keyword arguments: %s' % kwds)\n\n # set title, description if args[0] is string\n if isinstance(args[0], string_types):\n title = args[0]\n args = args[1:]\n if isinstance(args[0], string_types):\n description = args[0]\n args = args[1:]\n\n assert all(isinstance(arg, Command) for arg in args), 'all args should be instance of Command'\n self._arg_stack.append(('group', args, {'title': title, 'description': description}))\n return self", "def group(*args, show: bool = True, parent: str = \"\", before: str = \"\", width: int = 0, pos=[],\n horizontal: bool = False, horizontal_spacing: float = -1.0, id:str='', indent=-1):\n try:\n widget = internal_dpg.add_group(*args, show=show, parent=parent, before=before, width=width,\n horizontal=horizontal, horizontal_spacing=horizontal_spacing, id=id,\n indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def __make_group_by_atom(self, group_name, name_list):\r\n pass", "def command_group(group_name):\n def wrapper(func):\n func.group_name = group_name\n return func\n return wrapper", "def keyingGroup(*args, activator: Union[name, bool]=None, addElement: name=None, afterFilters:\n bool=True, category: Union[AnyStr, bool]=\"\", clear: name=None, color: Union[int,\n bool]=0, copy: name=None, edges: bool=True, editPoints: bool=True, empty:\n bool=True, excludeDynamic: bool=True, excludeRotate: bool=True, excludeScale:\n bool=True, excludeTranslate: bool=True, excludeVisibility: bool=True, facets:\n bool=True, flatten: name=None, forceElement: name=None, include: name=None,\n intersection: name=None, isIntersecting: name=None, isMember: name=None, layer:\n bool=True, minimizeRotation: bool=True, name: AnyStr=\"\", noSurfaceShader:\n bool=True, noWarnings: bool=True, nodesOnly: bool=True, remove: name=None,\n removeActivator: name=None, renderable: bool=True, setActiveFilter:\n Union[AnyStr, bool]=\"\", size: bool=True, split: name=None, subtract: name=None,\n text: Union[AnyStr, bool]=\"\", union: name=None, vertices: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def __init__(self, *args):\n _snap.GroupStmt_swiginit(self, _snap.new_GroupStmt(*args))", "def enter_group():\n logline(\"\\\\\", indent=False)\n global group_length\n group_length = group_length + 1", "def _generateExpandedEOCs(self, obj, **args):\n return []", "def __init__(self, *args):\n this = _libsbml.new_GroupsExtension(*args)\n try: self.this.append(this)\n except: self.this = this", "def group(description, *funcs, **kwargs):\n def _argument_group(parser):\n if kwargs.get('mutually_exclusive'):\n kwargs.pop('mutually_exclusive')\n g = parser.add_mutually_exclusive_group(**kwargs)\n elif kwargs:\n raise UserWarning(\n \"Unrecognized kwargs: %s\" % str(list(kwargs.keys())))\n else:\n g = parser.add_argument_group(description)\n for f in funcs:\n f(g)\n return _argument_group", "def create_command_group(\n self, name: str, *, aliases: Sequence[str] = (), help_text: str = None\n ) -> \"CommandGroup\":\n kwargs = {\"aliases\": aliases}\n if help_text:\n kwargs[\"help\"] = help_text\n group = CommandGroup(\n self._sub_parsers.add_parser(name, aliases=aliases, help=help_text),\n f\"{self._prefix}:{name}\" if self._prefix else name,\n self._handlers,\n )\n self._add_handler(group.dispatch_handler, name, aliases)\n\n return group", "def gen_group(group_name=None, group_vars={}):\n group = Group(name=group_name)\n for key, value in group_vars.iteritems():\n group.set_variable(key, value)\n return group", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def koie_group():\n return GroupFactory(name=\"Koiene\")", "def make_EnclosureGroupV200(associatedLIGs, name,\n powerMode='RedundantPowerSupply'):\n ligUri = associatedLIGs['uri']\n icms = associatedLIGs['interconnectMapTemplate']['interconnectMapEntryTemplates']\n ligs = []\n # With the 200 API, the LIG uri can only be assigned if the LIG contains a\n # definition of the interconnect bay. I.E. if the LIG only has ICM 1 and 2\n # defined then 3 - 8 must be set to None. I.E:\n # 'interconnectBayMappings': [{'interconnectBay': 1,\n # 'logicalInterconnectGroupUri':\n # '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'},\n # {'interconnectBay': 2,\n # 'logicalInterconnectGroupUri':\n # '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'}]}\n # {'interconnectBay': 3,\n # 'logicalInterconnectGroupUri': None},\n # {'interconnectBay': 4,\n # 'logicalInterconnectGroupUri': None},\n # ...\n for N in range(1, 9):\n if N > len(icms):\n ligs.append({'interconnectBay': N,\n 'logicalInterconnectGroupUri': None})\n else:\n ligs.append({'interconnectBay': N,\n 'logicalInterconnectGroupUri': ligUri})\n return {\n 'name': name,\n 'type': 'EnclosureGroupV200',\n 'stackingMode': 'Enclosure',\n 'powerMode': powerMode,\n 'enclosureCount': 1,\n 'enclosureTypeUri': \"/rest/enclosure-types/c7000\",\n 'interconnectBayMappingCount': 8,\n 'interconnectBayMappings': ligs\n }", "def group_by(self, *args):\n self._group_by.extend(args)\n return self", "def fusion_api_get_enclosure_groups(self, uri=None, param='', api=None, headers=None):\n return self.enclosure_group.get(uri=uri, api=api, headers=headers, param=param)", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n return\n console.print(f\"::group::[bright_blue]{title}[/]\")\n yield\n console.print(\"::endgroup::\")", "def add_bu_group(self, **kwargs):\n _stringify_kw(kwargs)\n \n bu_group = ElementTree.Element(xml_strings['backup_group'], **kwargs)\n self._root.append(bu_group)\n return XMLGroupOverlay(bu_group, self._root)", "def sub_command_group(self, name=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND_GROUP\r\n \r\n new_func = SubCommandGroup(func, name=name, **kwargs)\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def group(*args, absolute: bool=True, empty: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\",\n relative: bool=True, useAsGroup: AnyStr=\"\", world: bool=True, **kwargs)->AnyStr:\n pass", "def add_tools(self, name, *args):\n # Take stretch out\n stretch = self._left.children()[-1]\n stretch.parent(None)\n \n # Add group of widgets\n panel = Panel(title=name, parent=self._left, flex=0)\n vbox = VBox(parent=panel)\n for widget in args:\n widget.parent(vbox)\n \n # Put stretch back in\n stretch.parent(self._left)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "def argument_group(self, *, title: str = None, description: str = None):\n return self.parser.add_argument_group(title, description)", "def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None):\n return self.enclosure_group.update(body, uri, api, headers)" ]
[ "0.6563358", "0.6013126", "0.58348554", "0.5824765", "0.5731047", "0.5633604", "0.55728775", "0.54996365", "0.5480335", "0.5450153", "0.5426098", "0.539384", "0.5359584", "0.5350015", "0.5280982", "0.5273882", "0.5273159", "0.52682054", "0.52434725", "0.52317363", "0.51879567", "0.51777524", "0.51731896", "0.51723963", "0.5161461", "0.5147011", "0.5134496", "0.5105613", "0.5102315", "0.5085923" ]
0.6921773
0
Update an enclosure group. Currently the only attribute that can be updated is the name. [Arguments]
def fusion_api_edit_enclosure_group(self, body, uri, api=None, headers=None): return self.enclosure_group.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_update_logical_enclosure_from_group(self, uri=None, api=None, headers=None):\n param = '/updateFromGroup'\n return self.logical_enclosure.put(body=None, uri=uri, param=param, api=api, headers=headers)", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response", "def update_eip_group(self, id, name, client_token=None, config=None):\n path = utils.append_uri(self._get_path(), id)\n if client_token is None:\n client_token = generate_client_token()\n params = {\n b'update': None,\n b'clientToken': client_token\n }\n body = {\n 'name': name\n }\n return self._send_request(http_methods.PUT,\n path, body=json.dumps(body),\n params=params, config=config)", "def test_update_entry_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "def test_update_group(self):\n pass", "def update(ctx, name, description, tags):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.update_experiment_group(\n user, project_name, _group, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment group updated.\")\n get_group_details(response)", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def fusion_api_create_enclosure_group(self, body, api=None, headers=None):\n return self.enclosure_group.create(body, api, headers)", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def edit_group_command(self):\n self.switch_frame(\"Edit Group\")\n id = self.parent.get_frame_id(\"Edit Group\")\n self.parent.frames[id].display_group(self.user.active_group)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setName(self, *args):\n return _libsbml.Group_setName(self, *args)", "def fusion_api_delete_enclosure_group(self, name=None, uri=None, api=None, headers=None):\n return self.enclosure_group.delete(name, uri, api, headers)", "def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def fusion_api_edit_enclosure(self, body, uri, api=None, headers=None):\n return self.enclosure.update(body, uri, api, headers)", "def append_group_attr(self, analyte: str, group_name: str, key: str, value: Any):\n group_path = \"/\".join([\"\", ASSAYS, analyte, group_name])\n if group_path not in self.__file:\n raise ValueError(\"{} missing\".format(group_path))\n group = self.__file[group_path]\n if key in group.keys():\n key_path = group_path + \"/\" + key\n data = self.__file[key_path]\n normalized = normalize_attr_values(value)\n data[...] = normalized\n else:\n self.__write_value(group, key, value)", "def update_group(self, group_id, new_description):\n url = self.groups_url + \"/\" + group_id\n new_data = json.dumps({\"description\": new_description})\n\n return requests.put(url, new_data, headers=self.headers)", "def set_group_name(self, name):\n self.groupname = name", "def patch_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def setKind(self, *args):\n return _libsbml.Group_setKind(self, *args)", "def async_update_group_state(self) -> None:", "def fusion_api_create_enclosure_group_payload(self, body, lig_map=None, api=None):\n return self.enclosure_group.make_body(api, body, lig_map)", "def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)" ]
[ "0.66474724", "0.6280426", "0.6084134", "0.58900374", "0.5876445", "0.57525283", "0.5751663", "0.5745694", "0.5724241", "0.5721095", "0.57171476", "0.56950307", "0.5646967", "0.5571251", "0.5569868", "0.5559737", "0.5516183", "0.55124354", "0.5489764", "0.5470734", "0.54474366", "0.5446788", "0.5437649", "0.5436419", "0.5426245", "0.5414456", "0.5394142", "0.53304094", "0.532462", "0.53103447" ]
0.75451136
0
Deletes an enclosure group from the appliance based on name OR uri [Arguments]
def fusion_api_delete_enclosure_group(self, name=None, uri=None, api=None, headers=None): return self.enclosure_group.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_del_group(dbsync, group):\n pass", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def delete_entry_group(self, name):\n self.__datacatalog.delete_entry_group(name=name)", "def remove_group(args):\n\n # check config file is valid first\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"OIDC config file not valid, please use the verify function to debug\")\n return 1 \n\n result_remove_config_file = remove_group_from_json(args)\n result_remove_from_config = remove_group_config_file(args)\n\n if result_remove_config_file != 0 and result_remove_from_config != 0:\n print(\"Error. Group {} does not exist in DynaFed\".format(args.group))\n return 1\n\n if result_remove_config_file != 0 or result_remove_from_config != 0:\n print(\"Error while removing config for {}. Check {} is missing group and {}.conf is missing to ensure full removal.\".format(args.group, args.file, args.group))\n return 1\n return 0", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_delete_collection_group(self):\n pass", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_delete_group_reparent_hosts(self, inventoryloader):\n inventoryloader.del_group('glance_api', reparent_hosts=True)\n assert inventoryloader.groups['glance_all'].has_host('localhost')\n assert inventoryloader.hosts['localhost'].has_group('glance_all')", "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def test_delete_resource_group(self):\n pass", "def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')", "def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def remove_from_group(self, org, contact, group):\n pass", "def delete_group(user):\n return 'do some magic!'", "def fusion_api_delete_fabric(self, name=None, uri=None, api=None, headers=None):\n return self.fabric.delete(name, uri, api, headers)", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def test_groups_group_ref_delete(self):\n pass", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete(self, oid):\n path = '%s/security-groups/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group: %s' % truncate(res))\n return res[0]" ]
[ "0.6614238", "0.6519363", "0.64859056", "0.6396397", "0.63878655", "0.63591623", "0.6289345", "0.6154435", "0.61432517", "0.61174744", "0.60918987", "0.60578215", "0.60578215", "0.6043282", "0.6029336", "0.60041845", "0.5983331", "0.5981932", "0.59472394", "0.5928828", "0.59131545", "0.5907542", "0.5879032", "0.5873776", "0.58161515", "0.57906914", "0.57883096", "0.5743788", "0.5733646", "0.5729736" ]
0.80254894
0
Gets a compatiblity report. [Arguments]
def fusion_api_get_compatibility_report(self, uri, param='', api=None, headers=None): return self.migratableVcDomain.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n reportSample = CompatibilityReportSample()\n reportSample.run()", "def fusion_api_get_security_compatibility_report(self, uri=None, api=None, headers=None, param='/compatibility-report'):\n return self.security_standards.get(uri=uri, api=api, headers=headers, param=param)", "def get_reports(self):\n return ['auditree/compliance_config.md']", "def get_report(self) -> str:\n return self.diagnostics.get_report()", "def getConflictReport(self,srcInstaller,mode):\n data = self.data\n srcOrder = srcInstaller.order\n conflictsMode = (mode == 'OVER')\n if conflictsMode:\n #mismatched = srcInstaller.mismatchedFiles | srcInstaller.missingFiles\n mismatched = set(srcInstaller.data_sizeCrc)\n else:\n mismatched = srcInstaller.underrides\n showInactive = conflictsMode and settings['bash.installers.conflictsReport.showInactive']\n showLower = conflictsMode and settings['bash.installers.conflictsReport.showLower']\n if not mismatched: return ''\n src_sizeCrc = srcInstaller.data_sizeCrc\n packConflicts = []\n getArchiveOrder = lambda x: data[x].order\n for package in sorted(self.data,key=getArchiveOrder):\n installer = data[package]\n if installer.order == srcOrder: continue\n if not showInactive and not installer.isActive: continue\n if not showLower and installer.order < srcOrder: continue\n curConflicts = Installer.sortFiles([x.s for x,y in installer.data_sizeCrc.iteritems() \n if x in mismatched and y != src_sizeCrc[x]])\n if curConflicts: packConflicts.append((installer.order,package.s,curConflicts))\n #--Unknowns\n isHigher = -1\n buff = cStringIO.StringIO()\n for order,package,files in packConflicts:\n if showLower and (order > srcOrder) != isHigher:\n isHigher = (order > srcOrder)\n buff.write('= %s %s\\n' % ((_('Lower'),_('Higher'))[isHigher],'='*40))\n buff.write('==%d== %s\\n'% (order,package))\n for file in files:\n buff.write(file)\n buff.write('\\n')\n buff.write('\\n')\n report = buff.getvalue()\n if not conflictsMode and not report and not srcInstaller.isActive:\n report = _(\"No Underrides. Mod is not completely un-installed.\")\n return report", "def reports_cli():", "def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n },\n }\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `_get_report_values()` and pass `data` automatically.\n return self.env.ref('base_enh.recap_report').report_action(self, data=data)", "def report(self) -> Any:", "def register_reports(self):\n from ckanext.qa import reports\n return [reports.openness_report_info]", "def get_reports_command(\n client: Client, args: Dict[str, Any]\n) -> Union[str, Dict[str, Any]]:\n # Validate arguments\n params = get_reports_params(args)\n\n # Preparing header\n headers = {\n 'X-FeApi-Token': client.get_api_token(),\n 'Accept': CONTENT_TYPE_JSON,\n }\n\n # API call\n resp: Response = client.http_request(\n method='GET',\n url_suffix=URL_SUFFIX['GET_REPORTS'],\n params=params,\n headers=headers,\n )\n\n # Create file from Content\n if int(resp.headers.get('Content-Length', '')) > 0:\n file_entry = fileResult(\n filename=generate_report_file_name(args),\n data=resp.content,\n file_type=EntryType.ENTRY_INFO_FILE,\n )\n return file_entry\n else:\n return MESSAGES['NO_RECORDS_FOUND'].format('report contents')", "def reporting(self):\r\n return reporting.Reporting(self)", "def __report(arguments, _):\n ignored_packages, other_packages, invalid_packages, skips = __gather_package_data(\n arguments\n )\n\n packages, invalids = worker.report(\n other_packages,\n maximum_repositories=arguments.maximum_repositories,\n maximum_rez_packages=arguments.maximum_rez_packages,\n )\n\n invalids.extend(invalid_packages)\n\n _print_ignored(ignored_packages)\n print(\"\\n\")\n _print_skips(skips, arguments.verbose)\n print(\"\\n\")\n _print_invalids(invalids, arguments.verbose)\n print(\"\\n\")\n _print_missing(packages, arguments.verbose)\n\n sys.exit(0)", "def _lookup_report(self, name):\n join = os.path.join\n\n # First lookup in the deprecated place, because if the report definition\n # has not been updated, it is more likely the correct definition is there.\n # Only reports with custom parser sepcified in Python are still there.\n if 'report.' + name in odoo.report.interface.report_int._reports:\n return odoo.report.interface.report_int._reports['report.' + name]\n\n self._cr.execute(\"SELECT * FROM ir_act_report_xml WHERE report_name=%s\", (name,))\n row = self._cr.dictfetchone()\n if not row:\n raise Exception(\"Required report does not exist: %s\" % name)\n\n if row['report_type'] in ('qweb-pdf', 'qweb-html'):\n return row['report_name']\n elif row['report_rml'] or row['report_rml_content_data']:\n kwargs = {}\n if row['parser']:\n kwargs['parser'] = getattr(odoo.addons, row['parser'])\n return report_sxw('report.'+row['report_name'], row['model'],\n join('addons', row['report_rml'] or '/'),\n header=row['header'], register=False, **kwargs)\n elif row['report_xsl'] and row['report_xml']:\n return report_rml('report.'+row['report_name'], row['model'],\n join('addons', row['report_xml']),\n row['report_xsl'] and join('addons', row['report_xsl']),\n register=False)\n else:\n raise Exception(\"Unhandled report type: %s\" % row)", "def _generate_report(self):\n raise NotImplementedError", "def get_report(self):\n raise NotImplementedError('Agent is an abstract base class')", "def compliance(self) -> pulumi.Output['outputs.ComplianceNoteResponse']:\n return pulumi.get(self, \"compliance\")", "def report(self):\n\n def _format(versions):\n return pprint.pformat(dict(versions), indent=4)\n\n debug_dict = {'pushed_versions': _format(self._versions),\n 'consumer_versions': _format(self._versions_by_consumer)}\n if self.last_report != debug_dict:\n self.last_report = debug_dict\n LOG.debug('Tracked resource versions report:\\n'\n 'pushed versions:\\n%(pushed_versions)s\\n\\n'\n 'consumer versions:\\n%(consumer_versions)s\\n',\n debug_dict)", "def reportinfo(self):\n return self.fspath, 0, f\"usecase: {self.name}\"", "def report(*packages):\n accepted_commands = ['python','conda']\n for package in packages:\n loc = \"not installed in this environment\"\n ver = \"unknown\"\n\n try:\n module = importlib.import_module(package)\n loc = os.path.dirname(module.__file__)\n\n try:\n ver = str(module.__version__)\n except Exception:\n pass\n \n except (ImportError, ModuleNotFoundError):\n if package in accepted_commands:\n try:\n # See if there is a command by that name and check its --version if so\n try:\n loc = subprocess.check_output(['command','-v', package]).decode().splitlines()[0].strip()\n except:\n # .exe in case powershell (otherwise wouldn't need it)\n loc = subprocess.check_output(['where.exe', package]).decode().splitlines()[0].strip() \n out = \"\"\n try:\n out = subprocess.check_output([package, '--version'], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n out = e.output\n\n # Assume first word in output with a period and digits is the version\n for s in out.decode().split():\n if '.' in s and str.isdigit(s[0]) and sum(str.isdigit(c) for c in s)>=2:\n ver=s.strip()\n break\n except:\n pass\n elif package == 'system':\n try:\n ver = platform.platform(terse=True)\n loc = \"OS: \" + platform.platform()\n except Exception:\n pass\n else:\n pass\n \n print(\"{0:30} # {1}\".format(package + \"=\" + ver,loc))", "def report_full(*args, **kwargs): # real signature unknown\n pass", "def display_reports(self, layout): # pylint: disable=arguments-differ", "def report():\n pass", "def name(self):\n return 'Report'", "def do_diff_report():\n diff_report = render_diff_report()\n nori.core.email_loggers['report'].info(\n diff_report + '\\n\\n\\n' + ('#' * 76)\n )\n # use the output logger for the report files (for now)\n nori.core.output_logger.info('\\n\\n' + diff_report + '\\n\\n')", "def getRenderDependencies(*args, **kwargs)->AnyStr:\n pass", "def get_sellability_report(melons):", "def call_link_reports(args) ->None:\n\n if not args['no_cmd']:\n print_link_reports(args['report-id'])\n if args['yaml']:\n yaml_file(args['report-id'])\n if args['csv']:\n csv_file(args['report-id'])\n if args['json']:\n json_file(args['report-id']) \n\n config.logger.info(\"Link Report generated according to the format chosen by user\")", "def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)", "def report(ctx, input_file):\n if input_file is None:\n click.echo(report.get_help(ctx))\n return\n\n diff_report(input_file, ctx.obj)", "def reports_public(self):\r\n return reports.ReportsPublic(self)" ]
[ "0.6441463", "0.6036515", "0.59583783", "0.58950293", "0.57708544", "0.5478609", "0.5393911", "0.53844786", "0.53566957", "0.5342869", "0.5335296", "0.53022224", "0.5268538", "0.5234701", "0.51971084", "0.5164213", "0.5138171", "0.5127779", "0.50975144", "0.50963753", "0.5091238", "0.50862783", "0.50761926", "0.50707704", "0.5006639", "0.5006213", "0.5002266", "0.49950597", "0.49941677", "0.4976587" ]
0.6377854
1
Creates an ethernet network. [Arguments]
def fusion_api_create_ethernet_network(self, body, api=None, headers=None): return self.ethernet_network.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def make_ethernet_networkV3(name, description=None, ethernetNetworkType=None,\n purpose='General', privateNetwork=False,\n smartLink=True, vlanId=0):\n return {\n 'name': name,\n 'type': 'ethernet-networkV3',\n 'purpose': purpose,\n 'connectionTemplateUri': None,\n 'vlanId': vlanId,\n 'smartLink': smartLink,\n 'ethernetNetworkType': ethernetNetworkType,\n 'privateNetwork': privateNetwork}", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def create_enet_network(self, name, description=None,\n ethernetNetworkType=None, purpose='General',\n privateNetwork=False, smartLink=True, vlanId=0,\n typicalBandwidth=2500, maximumBandwidth=10000,\n blocking=True, verbose=False):\n bw = make_Bandwidth(typicalBandwidth, maximumBandwidth)\n xnet = make_ethernet_networkV3(name=name,\n ethernetNetworkType=ethernetNetworkType,\n purpose=purpose,\n privateNetwork=privateNetwork,\n smartLink=smartLink,\n vlanId=vlanId)\n task, entity = self.create_network(uri['enet'], xnet, bw, verbose)\n if blocking is True:\n task = self._activity.wait4task(task, tout=60, verbose=verbose)\n return entity", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def create_platform_network(enode, category, config):\n # Check if this category has a defined netns\n netns = config.get('netns', None)\n if netns is None:\n return\n\n # Create the given network namespace\n enode._docker_exec('ip netns add {}'.format(netns))\n\n # lo should always be up\n enode._docker_exec('ip netns exec {} ip link set dev lo up'.format(netns))", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])", "def create_net(self, net_name, shared=\"false\"):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _net_info = {\"network\":\n {\"name\": net_name,\n \"shared\": shared,\n \"admin_state_up\": True}}\n _body = json.dumps(_net_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating network.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of network Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Network is created successfully. Details : %s \" %\n output['network'])\n\n return output['network']['id']", "def test_create_network():\n _network = Network()", "def CreateAdHocNetwork(self, essid, channel, ip, enctype, key, encused,\n ics):\n self.wifi.CreateAdHocNetwork(essid, channel, ip, enctype, key, encused,\n ics)", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def _create_network_vm(args):\n #\n # maximum length of network name is 14 chars, longer names will result in\n # a failure 'numerical result out of range' when creating the bridge.\n if len(args.network_name) > 14:\n _logger.error('Network name %s to long, max is 14 characters.', args.network_name)\n return 1\n # check network name unicity\n conn = libvirt.openReadOnly(None)\n _vnets = []\n if conn:\n _vnets = [n.name() for n in conn.listAllNetworks() if n.name() == args.network_name]\n conn.close()\n else:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n if len(_vnets) != 0:\n print(\"Network with name [%s] already exists\" % args.network_name, file=sys.stderr)\n return 1\n\n return oci_utils.kvm.virt.create_virtual_network(network=args.net,\n network_name=args.network_name,\n ip_bridge=args.ip_bridge,\n ip_prefix=args.ip_prefix,\n ip_start=args.ip_start,\n ip_end=args.ip_end)", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for e in range(subnets_per_network):\n router = net_topo[\"routers\"][e]\n subnet = net_topo[\"subnets\"][e]\n self.neutron.remove_interface_from_router(subnet_id=subnet[\"id\"],\n router_id=router[\"id\"])\n self.neutron.delete_router(router[\"id\"])", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True", "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def create_network(self, context, network):\n\n LOG.debug(_(\"QuantumRestProxyV2: create_network() called\"))\n\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context, network[\"network\"])\n net_name = network[\"network\"][\"name\"]\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\"), net_name)\n\n # create in DB\n new_net = super(QuantumRestProxyV2, self).create_network(context,\n network)\n\n # create on networl ctrl\n try:\n resource = NET_RESOURCE_PATH % tenant_id\n data = {\n \"network\": {\n \"id\": new_net[\"id\"],\n \"name\": new_net[\"name\"],\n }\n }\n ret = self.servers.post(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to create remote \"\n \"network: %s\"), e.message)\n super(QuantumRestProxyV2, self).delete_network(context,\n new_net['id'])\n raise\n\n # return created network\n return new_net", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)" ]
[ "0.7072378", "0.706751", "0.68852335", "0.68721867", "0.6857736", "0.68266195", "0.6801317", "0.67727095", "0.670559", "0.66997755", "0.6598081", "0.65401465", "0.65227395", "0.6479247", "0.647173", "0.64481103", "0.6424945", "0.6411141", "0.6399511", "0.6328404", "0.63178134", "0.6303408", "0.626854", "0.6263146", "0.6253915", "0.6192031", "0.61855596", "0.6163094", "0.6128438", "0.61257577" ]
0.73980606
0
Creates ethernet networks in bulk based on a VLAN ID range. [Arguments]
def fusion_api_create_ethernet_bulk_networks(self, body, api=None, headers=None): return self.ethernet_network.bulk_create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for e in range(subnets_per_network):\n router = net_topo[\"routers\"][e]\n subnet = net_topo[\"subnets\"][e]\n self.neutron.remove_interface_from_router(subnet_id=subnet[\"id\"],\n router_id=router[\"id\"])\n self.neutron.delete_router(router[\"id\"])", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def create_network_bulk(self, tenant_id, network_list, sync=False):", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def create_and_trunk_vlan(self, host, vlan_id, vlan_name, intf_type, interface):\n\n dbg_str = self._dbg_str(host, \"create and enable\", vlan_id,\n vlan_name=vlan_name, interface=interface, intf_type=intf_type)\n LOG.debug(dbg_str)\n\n conn = self._connect(host)\n try:\n if_name = self._get_ifname(intf_type, interface)\n self._create_vlan(conn, vlan_id, vlan_name)\n self._add_intf_to_vlan(conn, vlan_id, if_name)\n except Exception as e:\n raise cexc.NOSConfigFailed(config=dbg_str, exc=e)\n conn.close()", "def make_tenant_vlan(name, ip, vid, interface):\n\n script = '\\n'.join([\n 'name={}',\n 'ip={}',\n 'vid={}',\n 'interface={}',\n '',\n '#',\n '# Binding br_ext to $interface',\n '#',\n 'sudo brctl addbr br_ext',\n 'sudo ip link set dev br_ext up',\n 'sudo brctl addif br_ext $interface',\n '',\n '#',\n '# Creating a namespace with $name with $ip',\n '# ',\n '',\n 'sudo ip netns add $name',\n 'sudo brctl addbr br_$name',\n 'sudo ip link set dev br_$name up',\n 'sudo ip link add veth0 type veth peer name veth0_$name ',\n 'sudo ip link set veth0 netns $name',\n 'sudo ip netns exec $name ip link set dev veth0 up',\n 'sudo ip netns exec $name ifconfig veth0 $ip netmask 255.255.255.0 up',\n 'sudo ip link set dev veth0_$name up',\n '',\n '#',\n '# Binding VID $vid to br_$name',\n '# Binding veth0_$name to br_$name',\n '#',\n 'sudo ip link add link br_ext br_ext.$vid type vlan id $vid',\n 'sudo ip link set dev br_ext.$vid up',\n 'sudo brctl addif br_$name veth0_$name',\n 'sudo brctl addif br_$name br_ext.$vid',\n ]).format(name, ip, vid, interface)\n return run_script(script)", "def create_vlan(module, switch, vlan_id, untagged_ports=None):\n global CHANGED_FLAG\n output = ''\n new_vlan = False\n\n cli = pn_cli(module)\n cli += ' vlan-show format id no-show-headers '\n existing_vlans = run_cli(module, cli)\n\n if existing_vlans is not None:\n existing_vlans = existing_vlans.split()\n if vlan_id not in existing_vlans:\n new_vlan = True\n\n if new_vlan or existing_vlans is None:\n cli = pn_cli(module)\n cli += ' vlan-create id %s scope fabric ' % vlan_id\n\n if untagged_ports is not None:\n cli += ' untagged-ports %s ' % untagged_ports\n\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created vlan with id %s\\n' % (switch, vlan_id)\n\n return output", "def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n self.neutron.create_port(network[\"id\"], **(port_create_args or {}))\n\n self.neutron.list_ports()", "def test_ipam_vlan_groups_create(self):\n pass", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def create_vlan(self, vlan_name, vlan_id, nexus_host, nexus_user,\n nexus_password, nexus_ports, nexus_ssh_port):\n with self.nxos_connect(nexus_host, int(nexus_ssh_port), nexus_user,\n nexus_password) as man:\n self.enable_vlan(man, vlan_id, vlan_name)\n vlan_ids = self.build_vlans_cmd()\n LOG.debug(\"NexusDriver VLAN IDs: %s\" % vlan_ids)\n for ports in nexus_ports:\n self.enable_vlan_on_trunk_int(man, ports, vlan_ids)", "def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n\n self.neutron.delete_port(port[\"id\"])", "def create_vlan_ports(self, ports=None, vlans=None, tagged='Tagged'):\n pass", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def add_subnets(self, router_name, netname):\n for subnet in self.router_data['properties']['networks'].keys():\n resource = str(router_name + '_' + subnet)\n subnet_resource = OrderedDict({ \n resource: {\n 'type': 'OS::Neutron::Subnet',\n 'properties': {\n 'name': resource,\n 'network_id': { \n 'get_resource': netname, \n },\n 'cidr': { \n 'get_param': resource + '_net_cidr'\n },\n 'gateway_ip': { \n 'get_param': resource + '_net_gateway'\n },\n 'allocation_pools': [{\n 'start': { 'get_param': resource + '_net_pool_start' },\n 'end': { 'get_param': resource + '_net_pool_end' }\n }],\n }\n }\n })\n self.template['resources'].update(subnet_resource)\n cidr = self.set_cidr(subnet)\n gw = self.set_gatewayIP(subnet, cidr)\n self.template['parameters'].update(OrderedDict({\n resource + '_net_cidr': {\n 'type': 'string',\n 'default': cidr\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_gateway': {\n 'type': 'string',\n 'default': gw\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_start': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[0]\n }}))\n self.template['parameters'].update(OrderedDict({\n resource + '_net_pool_end': {\n 'type': 'string',\n 'default': self.set_dhcp_pools(cidr)[1]\n }}))", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def createLotsNetworks(proxy_url, sessiontoken,network_number):\n myHeader = {\"Content-Type\": \"application/json\",\"Accept\": \"application/json\", 'csp-auth-token': sessiontoken}\n for x in range(0,network_number):\n display_name = \"network-name\"+str(x)\n myURL = (proxy_url + \"/policy/api/v1/infra/tier-1s/cgw/segments/\" + display_name)\n # '/tier-1s/cgw' might only be applicable for multi tier-1s architecture. To be confirmed.\n # print(myURL)\n json_data = {\n \"subnets\":[{\"gateway_address\":\"10.200.\"+str(x)+\".1/24\"}],\n \"type\":\"ROUTED\",\n \"display_name\":display_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"id\":\"network-test\"+str(x)\n }\n response = requests.put(myURL, headers=myHeader, json=json_data)\n json_response_status_code = response.status_code", "def _create_vlan(self, conn, vlan_id, vlan_name):\n\n req_js = {}\n req_js['vlan_id'] = vlan_id\n req_js['vlan_name'] = vlan_name\n req_js['admin_state'] = 'up'\n\n resp = conn.post(self.VLAN_REST_OBJ, req_js)\n self._check_process_resp(resp)", "def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n\n self.neutron.get_port(port[\"id\"])", "def create_delete_ip_n_times_nova_vlan(self):\n self.env.revert_snapshot(\"ready_with_3_slaves\")\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=hlp_date.DEPLOYMENT_MODE\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['compute']\n }\n )\n self.fuel_web.update_vlan_network_fixed(\n cluster_id, amount=8, network_size=32)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf_repeatably(cluster_id)\n\n self.env.make_snapshot(\"create_delete_ip_n_times_nova_vlan\")", "def _create_network_resources(self, tenant_id):\n logger.info(\"Creating network resources...\")\n net_name = \"ostf-autoscaling-test-service-net\"\n net_body = {\n \"network\": {\n \"name\": net_name,\n \"tenant_id\": tenant_id\n }\n }\n ext_net = None\n net = None\n for network in self.neutron_cli.list_networks()[\"networks\"]:\n if not net and network[\"name\"] == net_name:\n net = network\n if not ext_net and network[\"router:external\"]:\n ext_net = network\n if not net:\n net = self.neutron_cli.create_network(net_body)[\"network\"]\n subnet = self.helpers.os_conn.create_subnet(\n \"sub\" + net_name, net[\"id\"], \"10.1.7.0/24\", tenant_id=tenant_id\n )\n router_name = 'ostf-autoscaling-test-service-router'\n router = self.helpers.os_conn.create_router(\n router_name, self.helpers.os_conn.get_tenant(\"admin\"))\n self.neutron_cli.add_interface_router(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]})\n return net[\"id\"]", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def create_instance_bulk(self, context, tenant_id, neutron_ports, vms,\n port_profiles, sync=False):", "def set_vlan(self, vlan, name=None, state='active', mode=None, shutstate=None, vdc=None):\n\n assert isinstance(vlan, str)\n assert isinstance(vdc, list)\n\n self.logger.debug(\"Creating vlan {} on {}\".format(vlan, self.host))\n\n vlan = _vlanexpand(vlan)\n vlanlist = vlan.split(',')\n failed_vlans = None\n\n for vdcname in vdc:\n self.switchto_vdc(vdcname)\n for v in vlanlist:\n self.logger.debug(\"Creating vlan {} in vdc {} on {}\".format(v, self.current_vdc, self.host))\n commands = [\"config t ; vlan {}\".format(v)]\n if name is not None:\n commands.append(\"name {}\".format(name))\n if state != 'active':\n commands.append(\"state {}\".format(state))\n if mode is not None:\n commands.append(\"mode {}\".format(mode))\n if shutstate is not None:\n if shutstate:\n commands.append(\"shutdown\")\n else:\n commands.append(\"no shutdown\")\n\n try:\n self._send_xml_cli(commands)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n stacktrace = traceback.extract_tb(exc_traceback)\n self.logger.error(\"VLAN configuration for vlan {} on {} failed\".format(vlan, self.host))\n self.logger.debug(sys.exc_info())\n self.logger.debug(stacktrace)\n self.get_vlans_detail(vdc=vdcname)", "def dvs_vcenter_networks(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n subnets = []\n networks = []\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n for net in self.net_data:\n logger.info('Create network {}'.format(net.keys()[0]))\n netw = os_conn.create_network(network_name=net.keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(net.keys()[0]))\n subnet = os_conn.create_subnet(subnet_name=net.keys()[0],\n network_id=netw['id'],\n cidr=net[net.keys()[0]],\n ip_version=4)\n\n subnets.append(subnet)\n networks.append(netw)\n\n self.show_step(3)\n for net in networks:\n assert_true(os_conn.get_network(net['name'])['id'] == net['id'])\n\n self.show_step(4)\n logger.info('Delete network net_1')\n os_conn.neutron.delete_subnet(subnets[0]['id'])\n os_conn.neutron.delete_network(networks[0]['id'])\n\n self.show_step(5)\n assert_true(os_conn.get_network(networks[0]) is None)\n\n self.show_step(6)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n logger.info('Create subnet {}'.format(self.net_data[0].keys()[0]))\n # subnet\n os_conn.create_subnet(\n subnet_name=self.net_data[0].keys()[0],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n logger.info('Networks net_1 and net_2 are present.')" ]
[ "0.6673296", "0.6300114", "0.61748874", "0.6071634", "0.6015685", "0.60141206", "0.59842396", "0.59668964", "0.5962218", "0.5863182", "0.58117044", "0.5795901", "0.57938695", "0.5750605", "0.57131773", "0.56995904", "0.5686561", "0.56644434", "0.56219095", "0.55827266", "0.5571162", "0.55518854", "0.55334365", "0.5517596", "0.5473704", "0.5469129", "0.5454929", "0.54451406", "0.5438509", "0.5438231" ]
0.6486627
1
Updates an ethernet network. [Arguments]
def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None): return self.ethernet_network.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def update_net(self) -> None:\n self.units.update_net()", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def fusion_api_update_li_ethernet_settings(self, body=None, uri=None, api=None, headers=None):\n param = '/ethernetSettings'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def fusion_api_edit_fcoe_network(self, body=None, uri=None, api=None, headers=None):\n return self.fcoe_network.update(body, uri, api, headers)", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def update_networks(self, agent, force_hard=False):\n\n if self.update_type == \"soft\" and not force_hard:\n self._soft_update(agent.actor, agent.actor_target)\n self._soft_update(agent.critic, agent.critic_target)\n elif self.t_step % self.C == 0 or force_hard:\n self._hard_update(agent.actor, agent.actor_target)\n self._hard_update(agent.critic, agent.critic_target)", "def fusion_api_update_li_internal_networks(self, body=None, uri=None, api=None, headers=None):\n param = '/internalNetworks'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def test_networking_project_network_update(self):\n pass", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def setNetwork(self, network):\n # type: (str)->None\n\n self._validator.validate_one(\n 'network', VALID_OPTS['network'], network)\n self._ifAttributes['network'] = network", "def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def fusion_api_create_ethernet_network(self, body, api=None, headers=None):\n return self.ethernet_network.create(body, api, headers)", "def command_update(arguments):\n global current_name\n tag = arguments[0]\n if (len(arguments) == 2):\n old_target, new_target = (...), arguments[1]\n else:\n old_target, new_target = arguments[1:]\n\n to_replace = network[current_name, tag, old_target]\n if not len(to_replace):\n return '\"' + tag + ': ' + old_target + '\" - no such link for this entity'\n if len(to_replace) > 1:\n return 'Sorry, tag \"' + tag + '\" is ambiguous.'\n inverse_tag = to_replace[0].inverse_tag\n to_replace.unlink()\n network.addlink(current_name, tag, new_target, inverse_tag)\n\n return 'Updated link from \"' + tag + ': ' + old_target + '\" to \"' + tag + ': ' + new_target + '\"'", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def set_network(self, path, ip=\"\", netmask=\"255.255.255.0\", gateway=\"\"):\n\n with open(os.path.join(path, 'etc', 'network', 'interfaces'), 'w') \\\n as f:\n f.write(\"auto lo\\niface lo inet loopback\\n\\n\")\n\n if len(ip) <= 0:\n f.write(\"auto eth0\\niface eth0 inet dhcp\\n\")\n else:\n f.write(\"auto eth0\\niface eth0 inet static\\n\")\n f.write(\"\\taddress {0}\\n\\tnetmask {1}\\n\\tgateway {2}\\n\".\\\n format(ip, netmask, gateway))", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)" ]
[ "0.6428635", "0.6402031", "0.6374999", "0.62623984", "0.62599623", "0.6118094", "0.6089692", "0.60716885", "0.60629207", "0.60322446", "0.59647477", "0.5903338", "0.58663356", "0.5807716", "0.58008593", "0.5792203", "0.5781904", "0.57744235", "0.57427895", "0.57142216", "0.56874603", "0.5669434", "0.5665263", "0.56125885", "0.5579299", "0.5556263", "0.55523705", "0.5523674", "0.54938906", "0.54908884" ]
0.7585772
0
Deletes ethernet networks in bulk based on name OR uri. [Arguments]
def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None): return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_network_bulk(self, tenant_id, network_id_list, sync=False):", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete_networks(self):\n logging.debug(\"cleanup called\")\n # for network in self.networks.key():\n # self.networks[network].delete()\n for network in self.networks.values():\n logging.warn(\"Deleting network '%s'\" % network)\n print \"Deleting network '%s'\" % network\n # print self.networks[network]\n network.delete()\n self.networks = {}", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def cleanup_networks(self):\n for network in self.networks:\n try:\n network.remove()\n network.client.api.close()\n network.client.close()\n self.log_message(\n f'{dateutils.get_current_time()} '\n f'destroying docker network {network}'\n )\n except Exception:\n self.log_message(\n f'{dateutils.get_current_time()} ERROR: Could not remove docker '\n f'network {network}'\n )\n self.networks.clear()", "def fusion_api_create_ethernet_bulk_networks(self, body, api=None, headers=None):\n return self.ethernet_network.bulk_create(body, api, headers)", "def delete_network(self, network_o):\n tenant_mo = self.moDir.lookupByDn(network_o.group)\n\n # Filters the tenant children in memory looking for the ones that belongs to the Ap class with an specific name\n ap_list = filter(lambda x: type(x).__name__ == 'Ap' and x.name == AP_NAME,\n self.query_child_objects(str(tenant_mo.dn)))\n if len(ap_list) > 0:\n network_ap = ap_list[0]\n # Filters the tenant children in memory looking for the ones that belongs to the AEPg\n # class with an specific name\n network_epgs = filter(lambda x: type(x).__name__ == 'AEPg' and x.name == network_o.name + VLAN_SUFIX +\n str(network_o.encapsulation),\n self.query_child_objects(str(network_ap.dn)))\n # Removes EPG\n if len(network_epgs) > 0:\n network_epgs[0].delete()\n self.commit(network_epgs[0])\n\n # Filters the tenant children in memory looking for the ones that belongs to the BD class and with an specific\n # name\n bd_list = filter(lambda x: type(x).__name__ == 'BD' and x.name == VLAN + str(network_o.encapsulation),\n self.query_child_objects(str(tenant_mo.dn)))\n if len(bd_list) > 0:\n # Removes bridge domain\n bd_list[0].delete()\n self.commit(bd_list[0])", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete_networks(self, skip_list=None):\n skip_list = skip_list or []\n for account in self.accounts:\n rc, rsp = self.cal.get_virtual_link_list(account)\n\n for vlink in rsp.virtual_link_info_list:\n if vlink.name in skip_list:\n continue\n if self.user not in vlink.name:\n continue\n logger.info(\"Deleting Network: {}\".format(vlink.name))\n if self.dry_run:\n continue\n self.cal.delete_virtual_link(\n account,\n vlink.virtual_link_id)", "def test_delete_network(self):\n pass", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for device in devices:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n networks.append(device)\n status = 'error'\n if not networks:\n log.info(\"INFO: No network adapters connected to the VM to remove\")\n status = 'success'\n else:\n for network in networks:\n name = network.deviceInfo.label\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network\n remove_nic = vim.vm.ConfigSpec()\n remove_nic.deviceChange = [nic_spec]\n task = WaitForTask(vmachine.ReconfigVM_Task(spec=remove_nic))\n\n if task == 'success':\n log.info(\"removed '{}' network adapter : {}\".format(name, name_of_vm))\n else:\n log.info(\"Could not '{}' Remove Network adapter: {}\".format(name, name_of_vm))\n status = 'success'\n return status\n except Exception as error:\n log.info(\"Error in 'remove_nic' keyword... {} \\n {}\".format(error, error.message))", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def fusion_api_delete_fcoe_network(self, name=None, uri=None, api=None, headers=None):\n return self.fcoe_network.delete(name, uri, api, headers)", "def delete_network_segments(self, tenant_id, network_segments):", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def test_networking_project_network_delete(self):\n pass", "def test_delete_collection_host_subnet(self):\n pass", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)", "def test_delete_host_subnet(self):\n pass", "def network_cleanup(self, args):\n pass", "def delete_network_postcommit(self, context):\n for _switch in self.switches:\n self._remove_from_switch(_switch, context)", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass" ]
[ "0.7098266", "0.63764846", "0.6231341", "0.621161", "0.610695", "0.5970672", "0.59668976", "0.5963505", "0.59593266", "0.5945155", "0.5904437", "0.5896795", "0.5861827", "0.58478886", "0.58101887", "0.57695425", "0.57567275", "0.5752265", "0.5718329", "0.57131994", "0.5688621", "0.5684943", "0.5643181", "0.5641179", "0.56240535", "0.5622364", "0.5619443", "0.56023145", "0.55916405", "0.55835074" ]
0.73143464
0
Gets a default or paginated collection of Ethernet networks. [Arguments]
def fusion_api_get_ethernet_networks(self, uri=None, param='', api=None, headers=None): return self.ethernet_network.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]", "def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']", "def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()", "def show_networks():\n return get_networks()", "def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]", "def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def getNets(self):\n\t\treturn NetLoader.listNetworks()", "def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)", "def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]]:\n return pulumi.get(self, \"networks\")", "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)", "def enumerate_network(arg):\n\n network = ip_network(arg, strict=False)\n data = list(map(str, network.hosts()))\n data.insert(0, str(network.network_address))\n if network.prefixlen != network.max_prefixlen:\n data.append(str(network.broadcast_address))\n return data", "def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")", "def list_networks():\n return __sets.keys()", "def test_get_networks(self):\n pass", "def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)", "def get_network_list(network = None, include_details = True):\n \n if network == None: \n json_obj = requests.get(api_base_url + 'networks')\n return json.loads(json_obj.content)['networks']\n rq_url = api_base_url + '{}/sites'.format(network)\n json_obj = requests.get(rq_url)\n sites_list = json.loads(json_obj.content)\n d = OrderedDict(zip([x.pop('network_siteid') for x in sites_list['sites']], \n sites_list['sites']))\n if include_details: return d\n return d.keys()", "def __get_scanning_range(self):\n if self.__network is not None:\n return [self.__network]\n networks = []\n interfaces = netifaces.interfaces()\n for data in interfaces:\n ips = netifaces.ifaddresses(data)\n for key, interface_data in ips.items():\n for item in interface_data:\n if item.get(\"netmask\", None) is not None and \\\n item.get(\"addr\", None) is not None and \\\n self.is_legal_ip(item[\"netmask\"]):\n if item.get(\"addr\") not in [\"127.0.0.1\", \"0.0.0.0\"]:\n network = \"{ip}/{cird}\".format(ip=item[\"addr\"],\n cird=IPAddress(item[\"netmask\"]).netmask_bits())\n if network not in networks:\n networks.append(network)\n return networks", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def netlist(self):\n return self._netlist", "def networks(self) -> dict:\n return self.data[\"networks\"]", "def list_networks(self, filters=None):\n # If the cloud is running nova-network, just return an empty list.\n if not self.has_service('network'):\n return []\n\n # Translate None from search interface to empty {} for kwargs below\n if not filters:\n filters = {}\n return list(self.network.networks(**filters))", "def networks(self): # type: () -> t.Optional[t.Dict[str, t.Dict[str, t.Any]]]\n return self.network_settings.get('Networks')", "def network_list(request):\n flatpage = get_flatpage_or_none(request)\n network_list = Network.objects.filter(user_id=0)\n\n return {\n 'flatpage': flatpage,\n 'network_list': network_list,\n }", "def __call__(self) -> list:\n return self.network", "def _useful_network(self):\n\n networks = self._compile_networks()\n\n network = []\n for n in networks:\n if len(n) >= self.min_network_size:\n network += list(n)\n\n return network", "def fusion_api_get_fc_networks(self, uri=None, param='', api=None, headers=None):\n return self.fc_network.get(uri=uri, api=api, headers=headers, param=param)", "def get_nets_other(self, response):\n\n nets = []\n\n # Iterate through all of the networks found, storing the CIDR value\n # and the start and end positions.\n for match in re.finditer(\n r'^(inetnum|inet6num|route):[^\\S\\n]+((.+?)[^\\S\\n]-[^\\S\\n](.+)|'\n '.+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n\n try:\n\n net['range'] = net['range'] = '{0} - {1}'.format(\n ip_network(net_range)[0].__str__(),\n ip_network(net_range)[-1].__str__()\n ) if '/' in net_range else net_range\n\n except ValueError: # pragma: no cover\n\n net['range'] = net_range\n\n if match.group(3) and match.group(4):\n\n addrs = []\n addrs.extend(summarize_address_range(\n ip_address(match.group(3).strip()),\n ip_address(match.group(4).strip())))\n\n cidr = ', '.join(\n [i.__str__() for i in collapse_addresses(addrs)]\n )\n\n else:\n\n cidr = ip_network(net_range).__str__()\n\n net['cidr'] = cidr\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n\n except (ValueError, TypeError):\n\n pass\n\n return nets" ]
[ "0.70807594", "0.7058442", "0.6820558", "0.67844266", "0.6764509", "0.6748837", "0.672733", "0.6679583", "0.6646697", "0.66390085", "0.6600337", "0.64806616", "0.6420778", "0.63964254", "0.6385439", "0.6341666", "0.6318547", "0.6262977", "0.62547904", "0.6197826", "0.61162174", "0.60995144", "0.6084764", "0.6079074", "0.60400486", "0.6037184", "0.6025845", "0.6011626", "0.59902453", "0.5946447" ]
0.70728964
1
Gets the reserved vlan ID range for the fabric. [Arguments]
def fusion_api_get_fabric_reserved_vlan_range(self, uri=None, param='', api=None, headers=None): param = "/reserved-vlan-range%s" % param return self.fabric.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_range(self, channel):\n\n pr = self.device.query(f':PRANGE{channel}?')\n return pr", "def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def get_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> List[JSON]:\n return _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_get_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def get_block_range(self, min=0, max=0):\n return self.call('blockchain', [min, max])", "def getRange(self, epRange):\n epRange = list(map(int, epRange.split('-')))\n if len(epRange) > 1:\n return list(range(epRange[0], epRange[1]+1))\n else:\n return epRange", "def get_available_networks(desired_cidr, reserved_networks):\n # If there are no reserved networks, then return that all 'desired_cidr' (Network Object) range is available\n if not reserved_networks:\n # Since there are no reserved network, the lower, and upper boundary of the 'desired_cidr' can be used\n return [PyVPCBlock(network=desired_cidr, block_available=True)]\n\n # in order to find/calculate available networks, reduce list of networks to only overlapping networks\n overlapping_networks = []\n for reserved_net in reserved_networks:\n if desired_cidr.overlaps(reserved_net.get_network()):\n # need to figure out how the reserved network is 'blocking' the desired cidr\n overlapping_networks.append(reserved_net)\n\n # If overlapping_networks is empty, then there where reserved networks, but did not overlapped\n if not overlapping_networks:\n return [PyVPCBlock(network=desired_cidr, block_available=True)]\n\n # Sort PyVPCBlock objects (overlapping networks) by the 'network' field, so it will be easier to calculate\n overlapping_networks = sorted(overlapping_networks, key=lambda x: x.network, reverse=False)\n\n networks_result = []\n range_head = desired_cidr[0] # Mark the start of calculation at the HEAD (view details above) point\n range_tail = desired_cidr[-1] # Mark the end of calculation at the TAIL (view details above) point\n\n # Iterate over the overlapping networks\n for reserved_net in overlapping_networks:\n # If the lower boundary of current range_head is smaller than the lower boundary of reserved_net\n # It means the 'reserved_net' network is necessarily from 'the right' of range_head, and its available\n if range_head < reserved_net.get_start_address():\n networks_result.append(PyVPCBlock(start_address=range_head,\n end_address=reserved_net.get_start_address() - 1,\n block_available=True,\n resource_type='available block'))\n\n # Append the overlapping network as NOT available\n networks_result.append(PyVPCBlock(network=reserved_net.get_network(), resource_id=reserved_net.get_id(),\n name=reserved_net.get_name()))\n\n # If the most upper address of current reserved_net (that is overlapping the desired_cidr),\n # is larger/equal than the most upper address of desired_cidr, then there is no point perform calculations\n if reserved_net.get_end_address() >= range_tail:\n break\n else: # Else there might be other overlapping networks,\n # head should always point to the next lower available address\n # so only if current head is \"from the left\" of most upper overlapping network, set it as new head,\n # As there might be a case of an inner network, see reserved_net (2) for details\n if range_head < reserved_net.get_end_address():\n # Set the new range_head value, to one ip address above the upper boundary of reserved_net\n range_head = reserved_net.get_end_address() + 1\n\n # If last iteration (here are no more overlapping networks, until the 'range_tail' address)\n if overlapping_networks.index(reserved_net) == len(overlapping_networks) - 1:\n networks_result.append(PyVPCBlock(start_address=range_head,\n end_address=range_tail,\n block_available=True))\n return networks_result", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def calculate_overlap_ranges(network, reserved_network):\n if network.overlaps(reserved_network):\n ranges = []\n\n # If the lower boundary of current head is smaller than the lower boundary of reserved_network\n # It means the 'reserved_network' network is necessarily from 'the right' of head, and its available\n if network[0] < reserved_network[0]:\n ranges.append({'lower_ip': network[0], 'upper_ip': reserved_network[0] - 1, 'available': True})\n\n # Append the overlapping network as NOT available\n ranges.append({'lower_ip': reserved_network[0], 'upper_ip': reserved_network[-1], 'available': False})\n\n if reserved_network[-1] < network[-1]:\n ranges.append({'lower_ip': reserved_network[-1] + 1, 'upper_ip': network[-1], 'available': True})\n return ranges\n else:\n return [{'lower_ip': network[0], 'upper_ip': network[-1], 'available': True}]", "def ReserveIds(self, request, global_params=None):\n config = self.GetMethodConfig('ReserveIds')\n return self._RunMethod(\n config, request, global_params=global_params)", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def getRange(self):\n return self.range", "def range_partitioning(self) -> 'outputs.RangePartitioningResponse':\n return pulumi.get(self, \"range_partitioning\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def deployment_range(self):\n return self._packet.get('deployment-range', 25000)", "def run_id_range(job, context, graph_id, graph_name, chrom):\n work_dir = job.fileStore.getLocalTempDir()\n\n # download graph\n graph_filename = os.path.join(work_dir, graph_name)\n job.fileStore.readGlobalFile(graph_id, graph_filename)\n\n #run vg stats\n #expect result of form node-id-range <tab> first:last\n command = ['vg', 'stats', '--node-id-range', os.path.basename(graph_filename)]\n stats_out = context.runner.call(job, command, work_dir=work_dir, check_output = True).strip().split()\n assert stats_out[0].decode('ascii') == 'node-id-range'\n first, last = stats_out[1].split(b':')\n \n if isinstance(chrom, set):\n chrom = ','.join(sorted(chrom))\n \n return chrom, first, last", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_get_gain_range(self, *args)", "def getRange(self, c, name):\n self.validateChannel( name )\n limits = self.d[name].limits\n return limits", "def get_vlan_tag(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetVlanTag', self.handle)", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def range_(self):\n return self.bset.range_", "def _build_range(self) -> str:\n return build_sequence(filter(None, (self.uids, self.sequence)))" ]
[ "0.5596663", "0.5426265", "0.54124767", "0.54124767", "0.54124767", "0.54124767", "0.52774036", "0.5215836", "0.5128084", "0.51148784", "0.51071477", "0.5103045", "0.50768065", "0.5070782", "0.5069289", "0.50628775", "0.505875", "0.5030819", "0.5030819", "0.5030819", "0.5030819", "0.50265247", "0.4992764", "0.49901906", "0.49885648", "0.49866945", "0.4972945", "0.49551678", "0.4925401", "0.49050584" ]
0.76662236
0
Deletes a fabric based on name OR uri provided [Arguments]
def fusion_api_delete_fabric(self, name=None, uri=None, api=None, headers=None): return self.fabric.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_fabric_manager(self, name, uri=None, api=None, headers=None):\n return self.fabricmanager.delete(name=name, uri=uri, api=api, headers=headers)", "def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))", "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def delete(self, uri, where, selectionArgs):\n pass", "def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)", "def rm(args):\n args.delete = True\n return remove(args)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "def delete_from_provider(self, builder, provider, credentials, target, parameters):", "def delete(self, uri, body=None, headers=None, auth=False):\n return self.send_request('DELETE', uri, body, headers, auth)", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"", "def delete_fleet(Name=None):\n pass", "def delete(self, name):\n\n pass", "def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)", "def delete(self, host, file):", "def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)", "def delete(self, uri, **kwargs):\n return self.session.delete(uri, **kwargs)", "def delete():", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)", "def catalog_delete(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete(args.path, headers)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def delete(self, hostname):\n self.not_supported()", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def delete(cls, uri):\n return cls._perform_request(uri, 'DELETE')", "def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)" ]
[ "0.7209234", "0.644572", "0.6335949", "0.6312352", "0.63057125", "0.6299138", "0.62790596", "0.62148005", "0.6160461", "0.6147814", "0.61051947", "0.6067064", "0.60601664", "0.60313445", "0.60162103", "0.5998666", "0.59927475", "0.5990456", "0.5982862", "0.59563273", "0.595187", "0.5949206", "0.593989", "0.59335613", "0.59092605", "0.5907928", "0.58975005", "0.5886751", "0.5884571", "0.5854234" ]
0.7503989
0
Patch a fabric based on uri provided [Arguments]
def fusion_api_patch_fabric(self, uri, body, api=None, headers=None): return self.fabric.patch(uri, body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_edit_fabric_manager(self, body, uri, api=None, headers=None):\n return self.fabricmanager.put(body=body, uri=uri, api=api, headers=headers)", "def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)", "def patch(self, uri, data=None, **kwargs):\n return self.session.patch(uri, data=data, **kwargs)", "def fusion_api_fabric_manager_refresh(self, body, uri, api=None, headers=None):\n param = '/snapshot/'\n return self.fabricmanager.put(body=body, uri=uri, param=param, api=api, headers=headers)", "def patch(self, url, body=None, headers=None):\n return self._request('PATCH', url, body, headers)", "def fusion_api_patch_repository(self, uri, body=None, api=None, headers=None):\n\n return self.repository.patch(uri=uri, body=body, api=api, headers=headers)", "def test_patch(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.PATCH, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.patch(rest_url)", "def simulate_patch(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PATCH', path, **kwargs)", "def access_gemini_url_patch_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.patch(url)", "def patch(url, data=None, **kwargs):\n\n return request('patch', url, data=data, **kwargs)", "def fusion_api_patch_rack_manager(self, body, uri, api=None, headers=None):\n return self.rackmanager.patch(body=body, uri=uri, api=api, headers=headers)", "def fusion_api_patch_interconnect(self, body, uri, param='', api=None, headers=None):\n return self.ic.patch(body=body, uri=uri, api=api, headers=headers, param=param)", "def fusion_api_fabric_manager_remediate(self, body, uri, api=None, headers=None): # pylint: disable=unused-argument\n param = '/compliance/'\n\n return self.fabricmanager.put(body=body, uri=uri, api=None, headers=None, param=param)", "def _patch(self, url, json=None, **kwargs):\n kwargs = Connection._prepare_json_payload(json, **(kwargs or {}))\n return self._http.patch(self.cluster + url, timeout=self.timeout, **(kwargs or {}))", "def patch(url, to_error=_default_to_error, data=None, **kwargs):\n\n return request('patch', url, to_error=to_error, data=data, **kwargs)", "def patch(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError", "def patch(self, url_pattern):\n return self.route(url_pattern, methods=['PATCH'])", "def fusion_api_patch_enclosure(self, body, uri, api=None, headers=None, etag=None):\n return self.enclosure.patch(body, uri, api, headers, etag)", "def fusion_api_generic_patch(self, body, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.patch(uri=uri, headers=headers, body=json.dumps(body))", "def patch(self, url_or_path, *args, **kwargs):\n return self.request.patch(url_or_path, *args, **kwargs).json()", "def fusion_api_edit_rack(self, body, uri, api=None, headers=None):\n return self.rack.update(body, uri, api, headers)", "def simulate_patch(self, path='/', **kwargs):\n return self.simulate_request('PATCH', path, **kwargs)", "def _reloadFabric(self, fabric):\n\n # Execute command to poweroff/on\n self.device.configure(\n 'poweroff xbar {}\\nno poweroff xbar {}'.format(fabric, fabric))", "def patch(self, endpoint, params=None, data=None):\n params = params or dict()\n data = data or dict()\n return self.request(verb=requests.patch, address=self.project_address + endpoint,\n params=params, data=data)", "def fusion_api_patch_li(self, body=None, uri=None, api=None, headers=None):\n return self.li.patch(body, uri, api, headers)", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def patch(self, url, params='', headers=None, extra_environ=None,\n status=None, upload_files=None, expect_errors=False,\n content_type=None):\n return self._gen_request(RequestMethods.PATCH,\n url, params=params, headers=headers,\n extra_environ=extra_environ, status=status,\n upload_files=upload_files,\n expect_errors=expect_errors,\n content_type=content_type)", "def patch(self, controller_fs_uuid, patch):\n raise exception.OperationNotPermitted", "def simulate_patch(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('PATCH', path, **kwargs)", "def patch(*args, **kwargs):\n return update(*args, patch=True, **kwargs)" ]
[ "0.6105052", "0.59244967", "0.5895854", "0.568877", "0.56826943", "0.56228423", "0.5620909", "0.55593264", "0.55578506", "0.5538882", "0.55009985", "0.54826397", "0.54781485", "0.54612875", "0.5444421", "0.54081976", "0.5402439", "0.5346956", "0.5307811", "0.53018963", "0.5295525", "0.5247928", "0.5240963", "0.5234355", "0.5222191", "0.51757544", "0.5141716", "0.51415986", "0.5138305", "0.51333725" ]
0.70708567
0
Create support dump for the specified fabric uri [Arguments]
def fusion_api_create_fabric_support_dump(self, uri, body, api=None, headers=None): params = '/support-dumps' return self.fabric.post(uri, body, api, headers, params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_create_support_dump(self, body, api=None, headers=None):\n return self.dump.create(body=body, api=api, headers=headers)", "def fusion_api_download_fabric_support_dump(self, uri, localfile, api=None, headers=None):\n return self.fabric.get_file(uri=uri, localfile=localfile, api=api, headers=headers)", "def fusion_api_download_support_dump(self, uri, localfile, api=None, headers=None):\n return self.dump.get(uri=uri, localfile=localfile, api=api, headers=headers)", "def create_dump(self) -> Dict[str, str]:\n return self.http.post(self.config.paths.dumps)", "def Create(ctx,\n name,\n attributes = None):\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n if(attributes is not None):\n kwargsDict = simplejson.loads(attributes)\n attributes = dict(**kwargsDict)\n\n ctx.logger.info(\"\"\"name = \"\"\"+str(name)+\"\"\";\"\"\"+\"\"\"attributes = \"\"\"+str(attributes)+\"\"\";\"\"\"+\"\")\n try:\n CreateBackupTargetResult = ctx.element.create_backup_target(name=name, attributes=attributes)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(CreateBackupTargetResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def dump(config, args):\n timestamp = args.project.dump(args.node)\n print \"Dump ready at %s:%s\" % (args.node.hostname, \n args.project.dump_path(timestamp))", "def fusion_api_generate_li_forwarding_information_base_dump_file(self, uri, api=None, headers=None):\n param = '/forwarding-information-base'\n return self.li.post(uri=uri, api=api, headers=headers, param=param)", "def create_backup(ServerName=None, Description=None):\n pass", "def task_dump(self, localfile, withAttachments=True):\n with settings(user=self.serviceUser):\n with utils.tempfile() as temp:\n postgres.dumpToPath('trac', temp)\n\n files = {\n 'db.dump': temp,\n }\n\n if withAttachments is True:\n #files['attachments'] = 'attachments'\n files['trac-attachments'] = 'config/trac-env/files/attachments'\n\n archive.dump(files, localfile)", "def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()", "def test_bcftools_cli_dump(self):\n runner = CliRunner()\n result = runner.invoke(cli.main, [\"dump\"])\n assert result.exit_code == 0\n assert os.path.isfile(os.path.join(BASE_DIR, \"hmtnote_dump.pkl\"))", "def makepacks(config, history, comm, collection, database, host, port, years):\n des_db = database if database else 'ccsdm'\n des_tbl = collection if collection else 'booking_dump'\n CleanBookingDump(history, years, comm, des_tbl, des_db, host=host, port=port).execute()\n return", "def download_fabric_factory():\n local('hg clone http://bitbucket.org/yml/fabric_factory/')", "def runDump(self, listDump):\n\n if isinstance(listDump, list) is False:\n raise KeyError(\"listDump must be a list\")\n\n logger.debug(\"listDump: %s\", listDump)\n\n commandService = Command()\n\n\n for dump in listDump:\n \n try:\n logger.info(\"Dumping %s/%s in %s\" % (dump['service']['stack']['name'], dump['service']['name'], dump['target_dir']))\n environments = \"\"\n for env in dump['environments']:\n environments += \" -e '%s'\" % env.replace(':', '=')\n \n \n if 'entrypoint' in dump:\n entrypoint = \"--entrypoint='%s'\" % dump['entrypoint']\n else:\n entrypoint = ''\n \n # Check if folder to receive dump exist, else create it\n if os.path.isdir(dump['target_dir']) is False:\n os.makedirs(dump['target_dir'])\n logger.debug(\"Create directory '%s'\", dump['target_dir'])\n else:\n logger.debug(\"Directory '%s' already exist\", dump['target_dir'])\n \n commandService.runCmd(\"docker pull %s\" % dump['image'])\n \n for command in dump['commands']:\n dockerCmd = \"docker run --rm %s -v %s:%s %s %s %s\" % (entrypoint, dump['target_dir'], dump['target_dir'], environments, dump['image'], command)\n commandService.runCmd(dockerCmd)\n logger.info(\"Dump %s/%s is finished\" % (dump['service']['stack']['name'], dump['service']['name']))\n \n except Exception as e:\n logger.error(\"Error appear when dump '%s/%s', skip : %s\" % (dump['service']['stack']['name'], dump['service']['name'], e.message))\n # Don't beack backup if somethink wrong\n pass", "def dumpRancherDatabase(self, backupPath, listDatabaseSettings):\n\n if backupPath is None or backupPath == \"\":\n raise KeyError(\"backupPath must be provided\")\n if isinstance(listDatabaseSettings, dict) is False:\n raise KeyError(\"listDatabaseSettings must be provided\")\n\n if \"type\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database type\")\n if \"host\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database host\")\n if \"port\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database port\")\n if \"user\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database user\")\n if \"password\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database password\")\n if \"name\" not in listDatabaseSettings:\n raise KeyError(\"You must provide the database name\")\n\n commandService = Command()\n target_dir = \"%s/database\" % (backupPath)\n image = \"mysql:latest\"\n logger.info(\"Dumping the Rancher database '%s' in '%s'\", listDatabaseSettings['name'], target_dir)\n\n if os.path.isdir(target_dir) is False:\n os.makedirs(target_dir)\n logger.debug(\"Create directory '%s'\", target_dir)\n else:\n logger.debug(\"Directory '%s' already exist\", target_dir)\n\n commandService.runCmd(\"docker pull %s\" % image)\n command = \"sh -c 'mysqldump -h %s -P %s -u %s %s > %s/%s.dump'\" % (listDatabaseSettings['host'], listDatabaseSettings['port'], listDatabaseSettings['user'], listDatabaseSettings['name'], target_dir, listDatabaseSettings['name'])\n dockerCmd = \"docker run --rm -v %s:%s -e 'MYSQL_PWD=%s' %s %s\" % (target_dir, target_dir, listDatabaseSettings['password'], image, command)\n commandService.runCmd(dockerCmd)\n logger.info(\"Dump Rancher database is finished\")", "def main():\n snap_name = 'REST_Snap_' + strftime('%d%m%Y%H%M%S')\n ru.replication.create_storagegroup_snap(sg_id, snap_name)\n print('Check the Gui now or REST Client to see if snapshot '\n '{snap_name} was created for Storge Group {sg_id}'\n .format(snap_name=snap_name, sg_id=sg_id))", "async def database_create_backup(self, target: Union[str, Path] = None):\n bcfg = self.config[\"Database\"][\"Backup\"]\n backup_dir = Path(bcfg.get(\"BackupDir\", f\"{self._data_dir}/backup\")).expanduser()\n if not backup_dir.is_absolute():\n backup_dir = self._data_dir / backup_dir\n backup_dir.mkdir(parents=True, exist_ok=True)\n if target is None:\n fmt = bcfg.get(\"Format\", \"%FT%H%M%S_zerobot.sqlite\")\n now = datetime.datetime.now()\n target = backup_dir / now.strftime(fmt)\n else:\n if not isinstance(target, Path):\n target = Path(target)\n if not target.is_absolute():\n target = backup_dir / target\n # TODO: MaxBackups\n await zbdb.create_backup(self.database, target, self.eventloop)", "def mysqldump():\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db.sql\")", "def hexdump(args=None):\n args = parser.parse_args(args)\n with LogSetup(args):\n contents = args.file.read()\n args.file.close()\n dump(contents, width=args.width)", "def fRenderTargetBackupTab():\n node = nuke.thisNode()\n # create tab an button\n tab = nuke.Tab_Knob(\"fRenderTargetBackup_tab\",\"Backup Renders\")\n button = nuke.PyScript_Knob('backup')\n button.setCommand('import dmptools.utils.nukeCommands as nc;nc.fRenderTargetBackup()')\n button.setName('backup renders')\n button.setLabel('backup!')\n button.setTooltip('backup renders to a directory in /tmp/fRenderTarget/<current time>')\n \n # create checkbox\n checkBox = nuke.Boolean_Knob(\"userCustomPath\",\"Use custom path\")\n checkBox.setValue(False)\n # add output textfield\n output = nuke.File_Knob('output', 'backup path')\n output.setValue('/tmp/fRenderTarget/')\n\n # add knobs to the node\n node.addKnob(tab)\n node.addKnob(button)\n node.addKnob(checkBox)\n node.addKnob(output)", "def Run(self, args):\n project = properties.VALUES.core.project.Get(required=True)\n zone = {}\n zone['dnsName'] = args.dns_name\n zone['name'] = args.zone\n zone['description'] = args.description\n\n really = console_io.PromptContinue('Creating %s in %s' % (zone, project))\n if not really:\n return\n\n dns = self.context['dns']\n request = dns.managedZones().create(project=project, body=zone)\n try:\n result = request.execute()\n return result\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)", "def create_zfs_snapshot(self, name, source_zv_name, zfs_type='default'):\n pname=self.poolname + '/' + source_zv_name + '@' + name\n cmdstr=['zfs','snapshot',pname]\n\n try:\n self._execute(*cmdstr,root_helper=self.r_helper,run_as_root=True) \n except putils.ProcessExecutionError as err:\n LOG.error(_('Cmd :%s') % err.cmd)\n LOG.error(_('StdOut :%s') % err.stdout)\n LOG.error(_('StdErr :%s') % err.stderr)\n raise NameError('Error:failed to create snapshot for zfs volume:%s' % source_zv_name)", "def run_backup():\n host = re.search(\"([\\w.-]+)[:]?\", env.host).group()\n date = time.strftime('%Y%m%d%H%M%S')\n fname = '%(host)s-backup-%(date)s.gz' % {'date': date, 'host': host}\n green(\"Ingrese la contraseña de la clave privada local.\")\n sudo(\"pg_dump kine | gzip > /tmp/%s\" % fname, user=\"postgres\")\n get(\"/tmp/%s\" % fname, os.path.join(backup_dir, fname))\n sudo(\"rm /tmp/%s\" % fname, user=\"postgres\")", "def createBackupScript(self, wrapper):\n content = textwrap.dedent(\"\"\"\\\n #!/bin/sh\n umask 077\n %(bin)s/pg_dump \\\\\n --host=%(pgdata-directory)s \\\\\n --username postgres \\\\\n --format=custom \\\\\n --file=%(backup-directory)s/database.dump \\\\\n %(dbname)s\n \"\"\" % self.options)\n self.createExecutable(wrapper, content=content)", "def exportBulletFile(*argv):", "def main(args: Optional[Sequence[str]] = None):\n\n setup_logging()\n args = parse_args(args)\n now = datetime.utcnow()\n\n with doing(\"Parsing remote configuration\"):\n wp_config = parse_wp_config(args.source)\n\n with TemporaryDirectory() as d:\n work_location = parse_location(d, args.compression_mode)\n\n with doing(\"Saving settings\"):\n dump_settings(args, wp_config, now, join(d, \"settings.json\"))\n\n if args.maintenance_mode is True:\n with doing(\"Activate maintenance mode\"):\n activate_maintenance_mode(args.source)\n\n try:\n with doing(\"Copying database\"):\n db = create_from_source(wp_config, args.source, args.db_host)\n db.dump_to_file(join(d, \"dump.sql\"))\n\n with doing(\"Copying files\"):\n copy_files(args.source, work_location.child(\"wordpress\"), args.exclude, args.exclude_tag_all)\n\n finally:\n if args.maintenance_mode is True:\n with doing(\"Deactivate maintenance mode\"):\n deactivate_maintenance_mode(args.source)\n\n with doing(\"Writing archive\"):\n args.backup_dir.ensure_exists_as_dir()\n archive_location = make_dump_file_name(args, wp_config, now)\n\n archive_location.archive_local_dir(d, doing)\n doing.logger.info(\"Wrote archive %s\", archive_location)\n\n return archive_location", "def main(args): \n if args.type == 'FILEGDB':\n create_filegdb(args.name, args.path)\n elif args.type == 'ST_GEOMETRY' or args.type == 'SPATIALITE':\n create_sqlitedb(args.name, args.type, args.path)", "def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def snapshot( self, **kw ):\n try:\n# host = '127.0.0.1'\n# port = '1312'\n host_url = request.headers.get( 'Host' )\n positions = kw.get( 'positions', '' )\n url = kw.get( \"snapshot_url\" )\n jobno = kw.get( \"jobno\", '' ).strip()\n files = kw.get( \"file_exts\", '' )\n# log_id = int(kw.get(\"id\", 0))\n# qty = int(kw.get(\"qty\", 0))\n if url and positions and files:\n http_url = 'http://%s/sample/%s' % ( host_url, url )\n phantomjs = os.path.join( config.get( 'public_dir' ), 'phantomjs', 'phantomjs.exe' )\n snapshotjs = os.path.join( config.get( 'public_dir' ), 'phantomjs', 'snapshot.js' )\n img_dir = os.path.join( config.get( 'public_dir' ), 'upload', 'snapshot' )\n if not os.path.exists( img_dir ):\n os.makedirs( img_dir )\n # Usage: phantomjs.exe snapshot.js URL positions img_dir\n sp = subprocess.Popen( \"%s %s %s %s %s %s\" % ( phantomjs, snapshotjs, http_url, positions, img_dir, files ),\n stdout = subprocess.PIPE, stderr = subprocess.STDOUT )\n file_list = []\n dlzipFile = os.path.join( img_dir, \"%s_%s%d.zip\" % ( jobno, dt.now().strftime( \"%Y%m%d%H%M%S\" ),\n random.randint( 1, 1000 ) ) )\n while 1:\n if sp.poll() is not None:\n # print 'exec command completed.'\n break\n else:\n line = sp.stdout.readline().strip()\n # print line\n if line.endswith( '.pdf' ) or line.endswith( '.png' ):\n # zip to download\n file_list.append( line )\n if file_list:\n dlzip = zipfile.ZipFile( dlzipFile, \"w\", zlib.DEFLATED )\n for fl in file_list:\n dlzip.write( os.path.abspath( str( fl ) ), os.path.basename( str( fl ) ) )\n dlzip.close()\n try:\n for fl in file_list:\n os.remove( fl )\n except:\n pass\n return serveFile( unicode( dlzipFile ) )\n else:\n raise Exception( 'No file generated!' )\n except Exception, e:\n log.exception( str( e ) )\n flash( \"Error occor on the server side!\", 'warn' )\n redirect( \"/sample/%s\" % url )" ]
[ "0.5737798", "0.57283634", "0.55196244", "0.5374423", "0.53324497", "0.5213368", "0.5187489", "0.5109578", "0.5102413", "0.5096001", "0.5061058", "0.49354082", "0.4877158", "0.4850295", "0.4795383", "0.47656068", "0.47244245", "0.46866015", "0.46810403", "0.46444815", "0.46431068", "0.4634327", "0.46238425", "0.46200988", "0.4618152", "0.46143663", "0.46033734", "0.45909908", "0.4582321", "0.45779392" ]
0.77192104
0
Updates an fc network. [Arguments]
def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None): return self.fc_network.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_edit_fcoe_network(self, body=None, uri=None, api=None, headers=None):\n return self.fcoe_network.update(body, uri, api, headers)", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def fusion_api_patch_fcoe_network(self, body=None, uri=None, api=None, headers=None):\n return self.fcoe_network.patch(body, uri, api, headers)", "def update_net(self) -> None:\n self.units.update_net()", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)", "def update_networks(self, agent, force_hard=False):\n\n if self.update_type == \"soft\" and not force_hard:\n self._soft_update(agent.actor, agent.actor_target)\n self._soft_update(agent.critic, agent.critic_target)\n elif self.t_step % self.C == 0 or force_hard:\n self._hard_update(agent.actor, agent.actor_target)\n self._hard_update(agent.critic, agent.critic_target)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def alter_network(self, add=[], remove=[]):\n\n # make the required changes\n # NOTE: remove existing edges *before* adding new ones. \n # if edge e is in `add`, `remove` and `self.network`, \n # it should exist in the new network. (the add and remove cancel out.\n self.network.edges.remove_many(remove)\n self.network.edges.add_many(add) \n\n # check whether changes lead to valid DAG (raise error if they don't)\n affected_nodes = set(unzip(add, 1))\n if affected_nodes and not self.network.is_acyclic(affected_nodes):\n self.network.edges.remove_many(add)\n self.network.edges.add_many(remove)\n raise CyclicNetworkError()\n \n \n # accept changes: \n # 1) determine dirtynodes\n # 2) backup state\n # 3) score network (but only rescore dirtynodes)\n self.dirtynodes.update(set(unzip(add+remove, 1)))\n self._backup_state(add, remove)\n self.score = self._score_network_core()\n #print\"calculated score = \" + str(self.score)\n return self.score", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def update(self, args):\n pass", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def update():", "def update():", "def update(self):\n self.brain.update()", "def update(*args):", "def update(self):\r\n self.g = self.create_graph()", "def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)", "def update_edge(self, _id, source=None, target=None, name=None, data={}):\n return self.make_request(\"PUT\", \"nodes/\"+_id, { \"id\" : name, \"source\" : source, \"target\" : target, \"data\" : data })", "def main():\n parser = ArgumentParser(description=\"Update FCOE device udev persisted \"\n \"ordering.\")\n parser.add_argument(\"--prefix\", \"-p\", default=\"/target\",\n help=\"System files will be accessed under this \"\n \"prefix\")\n parser.add_argument(\"--sys-prefix\", \"-s\", default=\"/\",\n help=\"The /sys file system files will be accessed \"\n \"under this prefix\")\n args = parser.parse_args()\n NetworkDeviceManager(args.prefix, args.sys_prefix).process_system()" ]
[ "0.70301336", "0.66117173", "0.63020337", "0.62582415", "0.6241698", "0.6082638", "0.6021473", "0.59408474", "0.5880256", "0.57745713", "0.57460856", "0.56439745", "0.56274563", "0.5614248", "0.5601011", "0.5583813", "0.5581535", "0.55612636", "0.5538296", "0.54920584", "0.5464681", "0.5450278", "0.5444612", "0.5444612", "0.543678", "0.54290366", "0.54262763", "0.54087436", "0.53991485", "0.53952885" ]
0.7451471
0
Deletes an fc network from the appliance based on name OR uri [Arguments]
def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None): return self.fc_network.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_fcoe_network(self, name=None, uri=None, api=None, headers=None):\n return self.fcoe_network.delete(name, uri, api, headers)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def fusion_api_delete_fabric(self, name=None, uri=None, api=None, headers=None):\n return self.fabric.delete(name, uri, api, headers)", "def test_delete_network(self):\n pass", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def hfp_delete(handle, org_dn, name):\r\n\r\n dn = org_dn + \"/fw-host-pack-\" + name\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"HFP '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for device in devices:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n networks.append(device)\n status = 'error'\n if not networks:\n log.info(\"INFO: No network adapters connected to the VM to remove\")\n status = 'success'\n else:\n for network in networks:\n name = network.deviceInfo.label\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network\n remove_nic = vim.vm.ConfigSpec()\n remove_nic.deviceChange = [nic_spec]\n task = WaitForTask(vmachine.ReconfigVM_Task(spec=remove_nic))\n\n if task == 'success':\n log.info(\"removed '{}' network adapter : {}\".format(name, name_of_vm))\n else:\n log.info(\"Could not '{}' Remove Network adapter: {}\".format(name, name_of_vm))\n status = 'success'\n return status\n except Exception as error:\n log.info(\"Error in 'remove_nic' keyword... {} \\n {}\".format(error, error.message))", "def delete_network_profile(arn=None):\n pass", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)", "def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def fusion_api_delete_lig(self, name=None, uri=None, api=None, headers=None, etag=None):\n return self.lig.delete(name=name, uri=uri, api=api, headers=headers, etag=etag)" ]
[ "0.73524505", "0.718665", "0.71359557", "0.6984148", "0.684205", "0.68380195", "0.6440958", "0.6291581", "0.62819016", "0.6266252", "0.6256593", "0.62496793", "0.6151301", "0.61411", "0.61384004", "0.6127418", "0.6126441", "0.6062192", "0.60439926", "0.6012743", "0.60037327", "0.6002046", "0.59939516", "0.5980625", "0.5944177", "0.59402025", "0.5938199", "0.5892026", "0.5881957", "0.5874272" ]
0.7994204
0
Gets a default or paginated collection of FC networks. [Arguments]
def fusion_api_get_fc_networks(self, uri=None, param='', api=None, headers=None): return self.fc_network.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_networks():\n return get_networks()", "def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]", "def getNets(self):\n\t\treturn NetLoader.listNetworks()", "def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def network_list(request):\n flatpage = get_flatpage_or_none(request)\n network_list = Network.objects.filter(user_id=0)\n\n return {\n 'flatpage': flatpage,\n 'network_list': network_list,\n }", "def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()", "def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)", "def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']", "def fusion_api_get_fcoe_networks(self, uri=None, param='', api=None, headers=None):\n return self.fcoe_network.get(uri=uri, api=api, headers=headers, param=param)", "def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)", "def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]]:\n return pulumi.get(self, \"networks\")", "def list_networks():\n return __sets.keys()", "def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]", "def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)", "def list_networks(session):\n # type: (Session) -> List[Dict[str, Any]]\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n return _get_list(session, url_tail)", "def fusion_api_get_network_set(self, uri=None, param='', api=None, headers=None):\n return self.network_set.get(uri=uri, api=api, headers=headers, param=param)", "def __call__(self) -> list:\n return self.network", "def list_networks(self, filters=None):\n # If the cloud is running nova-network, just return an empty list.\n if not self.has_service('network'):\n return []\n\n # Translate None from search interface to empty {} for kwargs below\n if not filters:\n filters = {}\n return list(self.network.networks(**filters))", "def _useful_network(self):\n\n networks = self._compile_networks()\n\n network = []\n for n in networks:\n if len(n) >= self.min_network_size:\n network += list(n)\n\n return network", "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "def netlist(self):\n return self._netlist", "def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")", "def get_network_names(self): # type: () -> t.Optional[t.List[str]]\n if self.networks is None:\n return None\n\n return sorted(self.networks)", "def networks(self) -> dict:\n return self.data[\"networks\"]", "def get_network_list(network = None, include_details = True):\n \n if network == None: \n json_obj = requests.get(api_base_url + 'networks')\n return json.loads(json_obj.content)['networks']\n rq_url = api_base_url + '{}/sites'.format(network)\n json_obj = requests.get(rq_url)\n sites_list = json.loads(json_obj.content)\n d = OrderedDict(zip([x.pop('network_siteid') for x in sites_list['sites']], \n sites_list['sites']))\n if include_details: return d\n return d.keys()", "def test_get_networks(self):\n pass", "def getSDDCnetworks(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n json_response = get_cgw_segments_json(proxy, sessiontoken)\n if json_response != False:\n sddc_networks = json_response['results']\n table = PrettyTable(['Name', 'id', 'Type', 'Network', 'Default Gateway'])\n table_extended = PrettyTable(['Name', 'id','Tunnel ID'])\n for i in sddc_networks:\n if ( i['type'] == \"EXTENDED\"):\n table_extended.add_row([i['display_name'], i['id'], i['l2_extension']['tunnel_id']])\n elif ( i['type'] == \"DISCONNECTED\"):\n table.add_row([i['display_name'], i['id'], i['type'],\"-\", \"-\"])\n else:\n table.add_row([i['display_name'], i['id'], i['type'], i['subnets'][0]['network'], i['subnets'][0]['gateway_address']])\n print(\"Routed Networks:\")\n print(table)\n print(\"Extended Networks:\")\n print(table_extended)\n else:\n print(\"Something went wrong, please try again.\")\n sys.exit(1)", "def getnetnodes(self):\n # GetNetNodes2_bn is not listed in the API manual, but GetNetNodes_bn\n # is. Looks like an update to the API that is undocumented.\n\n # (const net_bn* net, const char options[])\n zerochar_type = c_char * 0\n cnetica.GetNetNodes2_bn.argtypes = [c_void_p, zerochar_type]\n cnetica.GetNetNodes2_bn.restype = c_void_p\n return cnetica.GetNetNodes2_bn(self.net, zerochar_type()) # nl_p" ]
[ "0.67147046", "0.6619213", "0.6521911", "0.6514606", "0.6473162", "0.6462291", "0.638875", "0.6371116", "0.6314734", "0.62708455", "0.62574273", "0.6198671", "0.6173483", "0.6166899", "0.6103448", "0.60939974", "0.6054709", "0.60487527", "0.60389477", "0.5981799", "0.597224", "0.59341264", "0.59216094", "0.58819926", "0.58569986", "0.5781619", "0.57499236", "0.5722716", "0.56939656", "0.5663704" ]
0.7045729
0
Updates an FCoE network. [Arguments]
def fusion_api_edit_fcoe_network(self, body=None, uri=None, api=None, headers=None): return self.fcoe_network.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def fusion_api_patch_fcoe_network(self, body=None, uri=None, api=None, headers=None):\n return self.fcoe_network.patch(body, uri, api, headers)", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def update_net(self) -> None:\n self.units.update_net()", "def update_networks(self, agent, force_hard=False):\n\n if self.update_type == \"soft\" and not force_hard:\n self._soft_update(agent.actor, agent.actor_target)\n self._soft_update(agent.critic, agent.critic_target)\n elif self.t_step % self.C == 0 or force_hard:\n self._hard_update(agent.actor, agent.actor_target)\n self._hard_update(agent.critic, agent.critic_target)", "def main():\n parser = ArgumentParser(description=\"Update FCOE device udev persisted \"\n \"ordering.\")\n parser.add_argument(\"--prefix\", \"-p\", default=\"/target\",\n help=\"System files will be accessed under this \"\n \"prefix\")\n parser.add_argument(\"--sys-prefix\", \"-s\", default=\"/\",\n help=\"The /sys file system files will be accessed \"\n \"under this prefix\")\n args = parser.parse_args()\n NetworkDeviceManager(args.prefix, args.sys_prefix).process_system()", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def command_update(arguments):\n global current_name\n tag = arguments[0]\n if (len(arguments) == 2):\n old_target, new_target = (...), arguments[1]\n else:\n old_target, new_target = arguments[1:]\n\n to_replace = network[current_name, tag, old_target]\n if not len(to_replace):\n return '\"' + tag + ': ' + old_target + '\" - no such link for this entity'\n if len(to_replace) > 1:\n return 'Sorry, tag \"' + tag + '\" is ambiguous.'\n inverse_tag = to_replace[0].inverse_tag\n to_replace.unlink()\n network.addlink(current_name, tag, new_target, inverse_tag)\n\n return 'Updated link from \"' + tag + ': ' + old_target + '\" to \"' + tag + ': ' + new_target + '\"'", "def test_networking_project_network_update(self):\n pass", "def execute(self, nodenet, nodes, netapi):\n pass # pragma: no cover", "def update(*args):", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def update(self, args):\n pass", "def network_node_changed(self, node=None, value=None, args=None):\n if node and node.node_id != self.node_id:\n return\n if args is not None and \"nodeId\" in args and args[\"nodeId\"] != self.node_id:\n return\n\n # Process central scene activation\n if value is not None and value.command_class == COMMAND_CLASS_CENTRAL_SCENE:\n self.central_scene_activated(value.index, value.data)\n\n self.maybe_update_application_version(value)\n\n self.node_changed()", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp", "def update_target_net(self, sess):\n sess.run(self.update_target_net_op)", "def alter_network(self, add=[], remove=[]):\n\n # make the required changes\n # NOTE: remove existing edges *before* adding new ones. \n # if edge e is in `add`, `remove` and `self.network`, \n # it should exist in the new network. (the add and remove cancel out.\n self.network.edges.remove_many(remove)\n self.network.edges.add_many(add) \n\n # check whether changes lead to valid DAG (raise error if they don't)\n affected_nodes = set(unzip(add, 1))\n if affected_nodes and not self.network.is_acyclic(affected_nodes):\n self.network.edges.remove_many(add)\n self.network.edges.add_many(remove)\n raise CyclicNetworkError()\n \n \n # accept changes: \n # 1) determine dirtynodes\n # 2) backup state\n # 3) score network (but only rescore dirtynodes)\n self.dirtynodes.update(set(unzip(add+remove, 1)))\n self._backup_state(add, remove)\n self.score = self._score_network_core()\n #print\"calculated score = \" + str(self.score)\n return self.score", "def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def update():", "def update():", "def update_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.update_network(network)\n except:\n pass", "def _update_nn(self, bad_feats, good_feats, rate):\n self.nn.update(bad_feats, good_feats, rate)" ]
[ "0.6986288", "0.6290593", "0.62284696", "0.60617834", "0.5997263", "0.59471333", "0.5898901", "0.5857858", "0.56189364", "0.56040245", "0.5580722", "0.54976", "0.54807156", "0.5430855", "0.540189", "0.5400122", "0.53872204", "0.5369302", "0.5361647", "0.53307605", "0.5322316", "0.5294867", "0.52825636", "0.5279019", "0.5253027", "0.52436435", "0.524313", "0.524313", "0.523623", "0.52309" ]
0.7037386
0
Gets a default or paginated collection of FCoE networks. [Arguments]
def fusion_api_get_fcoe_networks(self, uri=None, param='', api=None, headers=None): return self.fcoe_network.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_fc_networks(self, uri=None, param='', api=None, headers=None):\n return self.fc_network.get(uri=uri, api=api, headers=headers, param=param)", "def show_networks():\n return get_networks()", "def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]", "def getNets(self):\n\t\treturn NetLoader.listNetworks()", "def network_list(request):\n flatpage = get_flatpage_or_none(request)\n network_list = Network.objects.filter(user_id=0)\n\n return {\n 'flatpage': flatpage,\n 'network_list': network_list,\n }", "def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()", "def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']", "def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def __call__(self) -> list:\n return self.network", "def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]]:\n return pulumi.get(self, \"networks\")", "def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)", "def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)", "def collectNet(self):\n network = self.options.net\n # net option from the config file is a string\n if isinstance(network, basestring):\n network = [network]\n # in case someone uses 10.0.0.0,192.168.0.1 instead of\n # --net 10.0.0.0 --net 192.168.0.1\n if isinstance(network, (list, tuple)) and \",\" in network[0]:\n network = [n.strip() for n in network[0].split(\",\")]\n count = 0\n devices = []\n if not network:\n network = yield self.config().callRemote(\"getDefaultNetworks\")\n\n if not network:\n self.log.warning(\"No networks configured\")\n defer.returnValue(None)\n\n for net in network:\n try:\n nets = yield self.config().callRemote(\n \"getNetworks\", net, self.options.subnets\n )\n if not nets:\n self.log.warning(\"No networks found for %s\", net)\n continue\n ips = yield self.discoverIps(nets)\n devices += ips\n count += len(ips)\n except Exception as ex:\n self.log.exception(\n \"Error performing net discovery on %s: %s\", net, ex\n )\n self.log.info(\"Working on devices: %s\", devices)\n\n foundDevices = []\n for device in devices:\n result = yield self.discoverDevice(\n device, self.options.deviceclass, self.options.productionState\n )\n if result is not None:\n foundDevices.append(result)\n defer.returnValue(foundDevices)", "def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]", "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")", "def fusion_api_get_network_set(self, uri=None, param='', api=None, headers=None):\n return self.network_set.get(uri=uri, api=api, headers=headers, param=param)", "def list_networks(self, filters=None):\n # If the cloud is running nova-network, just return an empty list.\n if not self.has_service('network'):\n return []\n\n # Translate None from search interface to empty {} for kwargs below\n if not filters:\n filters = {}\n return list(self.network.networks(**filters))", "def test_get_networks(self):\n pass", "def get_network_list(network = None, include_details = True):\n \n if network == None: \n json_obj = requests.get(api_base_url + 'networks')\n return json.loads(json_obj.content)['networks']\n rq_url = api_base_url + '{}/sites'.format(network)\n json_obj = requests.get(rq_url)\n sites_list = json.loads(json_obj.content)\n d = OrderedDict(zip([x.pop('network_siteid') for x in sites_list['sites']], \n sites_list['sites']))\n if include_details: return d\n return d.keys()", "def list_networks():\n return __sets.keys()", "def netlist(self):\n return self._netlist", "def _useful_network(self):\n\n networks = self._compile_networks()\n\n network = []\n for n in networks:\n if len(n) >= self.min_network_size:\n network += list(n)\n\n return network", "def network_instances(self) -> Iterator[NetworkInstance]:\n return self._get_related_instance(NetworkInstance, \"l3-network\")", "def test_get_default_network(self):\n pass", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def list_networks(session):\n # type: (Session) -> List[Dict[str, Any]]\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n return _get_list(session, url_tail)", "def networks(self) -> dict:\n return self.data[\"networks\"]" ]
[ "0.66630465", "0.6403319", "0.6402483", "0.6193302", "0.6183048", "0.6146009", "0.6130396", "0.611007", "0.6103166", "0.60973394", "0.6060203", "0.6029414", "0.60237056", "0.60172004", "0.5909047", "0.5903866", "0.5893611", "0.5836047", "0.5805968", "0.5785176", "0.57635653", "0.57561487", "0.57487875", "0.5729577", "0.56857723", "0.56687313", "0.56629133", "0.5649334", "0.5638533", "0.5636832" ]
0.6555646
1
Remove firmware driver from appliance. [Arguments]
def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None): return self.driver.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def firmware_pack_remove(handle, org_name, name, org_parent=\"org-root\"):\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" %org_name)\n else:\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info(\"Firmware host pack <%s> not found.Nothing to remove\" % name)\n else:\n handle.remove_mo(mo)\n handle.commit()", "def remove(self):\n\t\tcall_sdk_function('PrlBootDev_Remove', self.handle)", "def test_delete_hyperflex_server_firmware_version(self):\n pass", "def removeDevice(self, node, fullDeviceName):", "def invalidate_firmware(self):\n self.exec_command('InvalidateFW')\n return None", "def remove():\n run('pew rm {0}'.format(package_name()))", "def remove_device(self, path):\n pass", "def deleteDevice(serial):\n swDB = switchdb.DB()\n swDB.deleteBySerial(serial)\n swDB.close()", "def cleanup_dpdk_framework(node, if1, if2):\n if node[u\"type\"] == NodeType.DUT:\n pci_address1 = Topology.get_interface_pci_addr(node, if1)\n pci_address2 = Topology.get_interface_pci_addr(node, if2)\n # We are not supporting more than one driver yet.\n nic_driver = Topology.get_interface_driver(node, if1)\n\n command = f\"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}\"\\\n f\"/entry/cleanup_dpdk.sh \" \\\n f\"{nic_driver} {pci_address1} {pci_address2}\"\n message = u\"Cleanup the DPDK failed!\"\n exec_cmd_no_error(node, command, timeout=1200, message=message)", "def fusion_api_remove_power_device(self, name=None, uri=None, api=None, headers=None):\n return self.pd.delete(name=name, uri=uri, api=api, headers=headers)", "def hfp_firmware_pack_item_remove(handle, org_dn, hfp_name, hw_vendor,\r\n hw_model, type):\r\n\r\n hfp_dn = org_dn + \"/fw-host-pack-\" + hfp_name\r\n dn = hfp_dn + \"/pack-image-\" + hw_vendor + \"|\" + hw_model + \"|\" + type\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"FirmwarePackItem '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n return mo", "def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)", "def test_unplug(self, mock_bld_drv):\n mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}\n\n # 1) With default cna_w_list\n mock_bld_drv.return_value.unplug.return_value = 'vnet_w'\n vif.unplug(self.adpt, 'instance', mock_vif)\n mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)\n mock_bld_drv.return_value.unplug.assert_called_once_with(\n mock_vif, cna_w_list=None)\n\n # Clean up\n mock_bld_drv.reset_mock()\n mock_bld_drv.return_value.unplug.reset_mock()\n\n # 2) With specified cna_w_list\n mock_bld_drv.return_value.unplug.return_value = None\n vif.unplug(self.adpt, 'instance', mock_vif, cna_w_list='cnalist')\n mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)\n mock_bld_drv.return_value.unplug.assert_called_once_with(\n mock_vif, cna_w_list='cnalist')", "def uninstall_mac_processor(interface, mac_profile):\n pass", "def disintegrate():\n click.confirm('Do you really want to uninstall?', abort=True)\n if click.confirm('Do you want to remove installed AppImages?'):\n cfgmgr = ConfigManager()\n if os.path.exists(cfgmgr['bin']):\n print(fc(\"{y}Removing bin for appimages{rst}\"))\n shutil.rmtree(cfgmgr['bin'], ignore_errors=True)\n if os.path.exists(cfgmgr['storageDirectory']):\n print(fc(\"{y}Removing storageDirectory for appimages{rst}\"))\n shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)\n print(fc(\"{y}Removing zap binary entrypoint{rst}\"))\n for path in os.getenv('PATH').split(os.pathsep):\n zap_bin = os.path.join(path, 'zap')\n if os.path.exists(zap_bin):\n os.remove(zap_bin)\n break\n print(fc(\"{y}Removing zap AppImage {rst}\"))\n dot_zap = os.path.join(os.path.expanduser('~'), '.zap')\n if os.path.exists(dot_zap):\n shutil.rmtree(dot_zap, ignore_errors=True)", "def handle_remove_driver(driver_repo: Repo, parser: Parser):\n # Get driver id:\n done_id = False\n _, driver_list = driver_repo.get()\n\n while not done_id:\n id_ = input(\"Enter driver id (numeric) or leave blank to see the driver list > \")\n if id_ == \"\":\n table_data = [[\"ID\", \"Name\"]]\n for driver in driver_list:\n table_data.append([str(driver.id), driver.name])\n\n driver_table = SingleTable(table_data, title=\"Drivers\")\n driver_table.justify_columns = {\n 0: \"left\",\n 1: \"center\",\n }\n while True:\n console.clear_console()\n print(driver_table.table)\n input_ = input(\"Type b or back to go back > \")\n if input_ == \"b\" or input_ == \"back\":\n break\n else:\n continue\n else:\n try:\n id_ = int(id_)\n\n if parser.check_if_already_exists(by_id=True, id=id_):\n # Id exists, continue:\n done_id = True\n driver_repo.delete(entity_id=id_)\n\n save_data(\n mode=\"single\", \n only=\"drivers\", \n driver_instance_list=driver_list\n )\n olt.show(\n title=\"Success\",\n message=\"The driver was removed succesfully\"\n )\n except ValueError:\n console.clear_console()\n olt.show(\n title=\"Info\",\n message=\"Invalid ID! The ID Must be numeric\",\n go_back=False\n )\n else:\n console.clear_console()\n olt.show(\n title=\"Info\",\n message=\"Invalid ID!\",\n go_back=False\n )", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")", "def remove(self, package):\n self.driver.remove_app(package)", "def uninstall(package):\n return G.DEVICE.uninstall_app(package)", "def driver_reset(driver='openstack', skip=None):\n\n mach = Dockerizing(driver)\n import time\n skip_nodes = []\n if skip is not None:\n skip_nodes = skip.split(',')\n # Find machines in list which are based on this driver\n for node in mach.list(with_driver=driver):\n if node in skip_nodes:\n _logger.info(\"Skipping '%s'\" % node)\n\n continue\n # REMOVE THEM!!\n _logger.warning(\"Removing machine '%s'!\" % node)\n time.sleep(5)\n mach.remove(node)\n _logger.info(\"Done\")", "def delete_driver(driver_id):\n driver = utils.get_dict_by_key_value_from_list('id', driver_id, drivers)\n if driver:\n drivers.remove(driver)\n return jsonify({\"message\": \"The object was deteled successfully\"})\n return jsonify({\"message\": \"Object not found\"})", "def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)", "def removeFake(v):\n\n if len(v.fakes) > 0:\n menu.menuBanner(v)\n i = 1\n print(\" --------------------------------------------------------\")\n for x in v.fakes:\n print(f\" {i} - {x}\")\n i += 1\n print(f\" {i} - ALL\")\n print(\" --------------------------------------------------------\")\n try:\n sel = int(input(\" Enter selection you want to delete: \")) - 1\n except ValueError:\n print(\" \" + bcolors.WARNING + \"Only input integers\" + bcolors.ENDC)\n time.sleep(1)\n return\n except KeyboardInterrupt:\n return\n\n if not 0 <= sel < i:\n print(\" \" + bcolors.WARNING + str(sel + 1) + \" is not a selection\" + bcolors.ENDC)\n time.sleep(1)\n return\n\n if sel == len(v.fakes):\n v.fakes = []\n return\n\n\n bash = (\"ip addr del \" + v.fakes[sel] + \"/0 dev dummy label dummy:\" + str(sel))\n os.system(bash)\n v.fakes.pop(sel)\n return\n else:\n print(\" \" + bcolors.WARNING + \"No fake NICs\" + bcolors.ENDC)\n time.sleep(1)\n return", "def __del__(self):\n self.DcMotor.run(Adafruit_MotorHAT.RELEASE) # changed rightMotor to DcMotor , RFMH_2019_02_28\n del self.motorhat", "def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def remove_descriptor(self, uuid):", "def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)", "def unload_kernel_module(params) -> None:\n print(\"Unloading kernel module...\")\n if os.system(\"modprobe -r v4l2loopback >/dev/null 2>&1\") == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")", "def uninstall_feature(client_bin, module, feature_name, feature_version):\n full_qualified_name = feature_name\n if feature_version:\n full_qualified_name = full_qualified_name + \"/\" + feature_version\n cmd = CLIENT_KARAF_COMMAND_WITH_ARGS.format(client_bin, PACKAGE_STATE_MAP[\"absent\"], full_qualified_name)\n rc, out, err = module.run_command(cmd)\n\n if rc != 0:\n reason = parse_error(out)\n module.fail_json(msg=reason)\n\n is_installed = is_feature_installed(client_bin, module, feature_name, feature_version)\n if is_installed:\n module.fail_json(msg='Feature fails to uninstall')\n\n return True, cmd, out, err", "def remove(name):\n if name==\"autopy\":\n print(\"\\n\\tUNINSTALLING WORKING MODULE WILL CAUSE ERRORS AND MAKE YOUR CODE UNUSABLE\\n\")\n choice=input(f\"Are you sure to remove {name}?\\nEnter YES,PROCEED to continue:\")\n if choice == 'YES,PROCEED':os.system(f'python -m pip uninstall {name}')\n else:print(\"Operetion Cancelled\")" ]
[ "0.64036894", "0.64008635", "0.6357454", "0.633413", "0.61674076", "0.60862267", "0.6082947", "0.6042765", "0.59809154", "0.59704435", "0.5954235", "0.58819866", "0.586851", "0.57959574", "0.57602715", "0.5741407", "0.57126486", "0.5649886", "0.5625113", "0.5588033", "0.55675405", "0.55354875", "0.552535", "0.54876155", "0.5486867", "0.5486382", "0.5483374", "0.5472409", "0.54680103", "0.54601395" ]
0.7694544
0
Get global settings [Example] ${resp} = Fusion Api Get Global Settings | | | |
def fusion_api_get_global_settings(self, uri=None, api=None, headers=None, param=''): return self.settings.get(uri, api, headers, param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_global_config(baseurl, cookie_header):\n url = baseurl + 'stacking/vsf/global_config'\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def fusion_api_get_login_domains_global_settings(self, api=None, headers=None, param=''):\n return self.domain_settings.get(api, headers, param)", "def settings():\n return _get_settings()[1]", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def fusion_api_get_lsg_default_settings(self, api=None, headers=None):\n return self.lsg.get(api=api, param='/defaultSettings', headers=headers)", "def globalsettings(golbalsettingbutton):\n try:\n atomacclick(golbalsettingbutton)\n global_settings_content = getApplicatontitle(golbalsettingbutton)\n except Exception as er:\n print \"Not able to get globalsettings_content\"\n return False\n return global_settings_content", "def Global(**kwargs):\n\n path = \"{base}{endpoint}\".format(base=API_URL,endpoint=API_ENDPOINTS['global'])\n resp = requests.get(path,params=kwargs)\n return resp.json()", "def settings_config(session, return_type=None, **kwargs):\n path = '/api/return_type.json'\n return session.get_api(path=path, return_type=return_type, **kwargs)", "def get_vpsa_flc_global(session, return_type=None, **kwargs):\n path = '/api/settings/flc_global.json'\n\n return session.get_api(path=path, return_type=return_type, **kwargs)", "def client_settings():\n return CLIENT_SETTINGS", "def settings_global(self) -> api.SettingsGlobal:\n return self._get_model(model=api.SettingsGlobal)", "def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))", "def getCurrentSetting(self):\n return {}", "def googledrive_config_get(node_addon, auth, **kwargs):\n return {\n 'result': serialize_settings(node_addon, auth.user),\n }", "def api_settings(_request):\n account = models.Account.current_user_account\n return {\n 'xsrf_token': account.get_xsrf_token(),\n 'email': account.email,\n 'nickname': account.nickname,\n 'deprecated_ui': account.deprecated_ui,\n 'default_context': account.default_context,\n 'default_column_width': account.default_column_width,\n 'default_tab_spaces': account.default_tab_spaces,\n 'notify_by_email': account.notify_by_email,\n 'notify_by_chat': account.notify_by_chat,\n 'add_plus_role': account.add_plus_role,\n 'display_generated_msgs': account.display_generated_msgs,\n 'send_from_email_addr': account.send_from_email_addr,\n }", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings(self):\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n return self._imgur._send_request(url)", "def fusion_api_get_configuration(self, uri=None, param='', api=None, headers=None):\n return self.configuration.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_get_lsg_setting(self, uri, settingsId=None, api=None, headers=None):\n param = '/settings/%s' % (settingsId)\n return self.lsg.get(uri=uri, api=api, param=param, headers=headers)", "def conf():\n global config\n return config", "def printSettings():\n print \">>>\\n>>> SettingsTool: global variables:\"\n for variable, value in globals().items():\n if variable.count('__')>1: continue\n print \">>> %-16s = %s\"%(variable,value)\n print \">>>\"", "def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)", "def settings(request):\n return {\"SETTINGS\": django_settings, \"GLOBAL_DEFINITIONS\": global_definitions}", "def __get_base_info_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/settings\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)", "def grpc_settings(self) -> 'outputs.NotificationEndpointGrpcSettingsResponse':\n return pulumi.get(self, \"grpc_settings\")", "def myCurrentSetting(self):\n paramDict = self.getCurrentSetting()\n return paramDict", "def cont_settings_(request):\n \n return {\"settings\": settings}", "def get_account_settings():\n pass" ]
[ "0.65836966", "0.6542899", "0.635939", "0.630538", "0.6245876", "0.61720496", "0.61032206", "0.60357594", "0.6023878", "0.59888023", "0.5916567", "0.59124744", "0.59032035", "0.58879673", "0.58629936", "0.5862954", "0.5855244", "0.5855244", "0.5810491", "0.579867", "0.5791221", "0.5789241", "0.577284", "0.57673854", "0.5757151", "0.5742414", "0.5728451", "0.5725208", "0.5717304", "0.5709796" ]
0.7816922
0
Update global settings [Arguments]
def fusion_api_update_global_settings(self, body=None, api=None, headers=None): return self.settings.update(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_settings(command):\n namespace = app.main(command)\n assert namespace.command == 'u' or namespace.command == \"updatesettings\"", "def update_global_config(self, config, **kwargs):\n pass", "def update_settings(self):\n\n param = \"settings.py\"\n self._check_path_availability([\"get_settings_dir\", \"get_settings_dir_to\"])\n self.updater.update_files(\n self.analizer.get_settings_dir(),\n self.analizer.get_settings_dir_to(),\n param,\n )\n return self.write_debug_message(\"Settings upgrade is done!\\n\")", "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def change_settings(new_settings={}, file=None):\n gl = globals()\n if file is not None:\n execfile(file)\n gl.update(locals())\n gl.update(new_settings)\n # Here you can add some code to check that the new configuration\n # values are valid.", "def updateSettingsUI(self):\n\n pass", "def update(self, settings):\n self.settings.cache_clear()\n self._settings = settings\n log.info(\"Updated settings to %s\", self._settings)", "def update_settings( what_to_do, settings_inst ):\n from settings import smart_update\n from _settings import settings\n\n smart_update(settings_inst, settings)\n # ok, we want to have parallel\n if what_to_do == \"wikis_to_huge_math\":\n settings_inst[\"input\"] = settings_inst[\"wiki\"][\"xml\"]\n # there are too few so each process should take only 1\n settings_inst[\"parallel\"][\"chunksize\"] = 1", "def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)", "async def settings(self, ctx: BBContext):\n pass", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def conf_update(self):\n pass", "def update_settings(self, param):\n if param.name() == '':\n pass", "async def fishingsettings(self, ctx:commands.Context):", "def update_current_settings(file_name):\n new_settings = importlib.import_module(file_name)\n for k, v in new_settings.__dict__.items():\n if k.upper() == k:\n globals().update({k: v})", "def _apply_settings(self):\n if 'fixed_delta_seconds' in self.dict_settings:\n self.settings.fixed_delta_seconds = self.dict_settings['fixed_delta_seconds']\n\n self.world.apply_settings(self.settings)", "def update(*args):", "def update(self, args):\n pass", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def setGlobal(name, value):", "def pg_update_settings(self, settings, restart=True):\n\n for k, v in settings.items():\n self.pg_set(k, v)\n if restart:\n self.pg_cmd(\"restart\")", "def edit_settings(self):\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n valid_numbers, number_setting_corr = self.print_settings()\n print('Which setting you want to change? Enter \"number, new value\" to modify, or \"done\" to exit.')\n print('Observe the possible values for each setting! They are case sensitive. '\n 'Inputting wrong values might break the program. \\n')\n choice = input('Input:')\n if choice == 'done':\n break\n if ',' not in choice:\n print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')\n continue\n if len(choice.split(',')) != 2:\n print('Invalid input, must have only one comma')\n continue\n\n var, val = choice.split(',')\n if var not in valid_numbers:\n print('Invalid number.')\n continue\n real_var = number_setting_corr[var] # Changes from a number to the actual parameter\n if val.lower() == 'true':\n setattr(self, real_var, True)\n continue\n elif val.lower() == 'false':\n setattr(self, real_var, False)\n continue\n else:\n setattr(self, real_var, val)\n\n # todo: check for all possible values to avoid inputting wrong settings and messing everything up.\n # if val not in valid_options_nl_sorting:\n # print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in valid_options_lin_sorting:\n # print('Invalid linear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in models:\n # print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')\n # continue\n\n print('===Final settings===')\n _, _ = self.print_settings()\n self.save_settings()\n return", "def update_from_env(self):\n for key, value in os.environ.items():\n if not key.startswith(self._prefix):\n continue\n\n setting = key[len(self._prefix):]\n if setting not in self._default_settings:\n continue\n\n setting_value = getattr(self, setting)\n if isinstance(setting_value, bool):\n value = (value == 'True')\n elif isinstance(setting_value, (int, float)):\n value = type(setting_value)(value)\n elif isinstance(setting_value, (list, dict)):\n value = json.loads(value)\n\n setattr(self, setting, value)\n self._explicit_settings.add(setting)", "def change_settings(settings, methods=['GET', 'POST']):\n message = resolve_settings(settings)\n socketio.emit('settings_update', SETTINGS)\n socketio.emit('log', message)", "def _edit_setting(self):\n settings = fileIO.load_json(\"settings.json\")\n self._list_settings(settings=settings)\n option = False\n while not option: #While loop until valid setting given\n option = input(\"Please type the setting you would like to change: \")\n if option not in settings:\n option = False\n newSetting = input(\"Please enter what you would like to change that setting to: \")\n command = \"edit_setting {0} {1}\".format(option, newSetting)\n return(command)", "def update_settings(self, settings_list):\n for i, x in enumerate(settings_list):\n self.update_settings_at_index(settings=x, index=i)", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def apply_settings():\n\n scs_globals = _get_scs_globals()\n\n # avoid recursion if another apply settings is running already\n if scs_globals.config_update_lock:\n return False\n\n # NOTE: save file paths in extra variables and apply them on the end\n # to make sure all of the settings are loaded first.\n # This is needed as some libraries reading are driven by other values from config file.\n # For example: \"use_infixed\"\n scs_project_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.scs_project_path, scs_globals)\n shader_presets_filepath = _property_utils.get_by_type(bpy.types.GlobalSCSProps.shader_presets_filepath, scs_globals)\n trigger_actions_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_rel_path, scs_globals)\n sign_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_rel_path, scs_globals)\n tsem_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_rel_path, scs_globals)\n traffic_rules_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path, scs_globals)\n hookup_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.hookup_library_rel_path, scs_globals)\n matsubs_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.matsubs_library_rel_path, scs_globals)\n sun_profiles_library_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sun_profiles_lib_path, scs_globals)\n conv_hlpr_converters_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.conv_hlpr_converters_path, scs_globals)\n\n # NOTE: as dump level is written in same section as config type\n # applying it directly might take place before we get information about config type\n # so it has to be saved into variable and applied only if global settings are loaded from config file\n dump_level = scs_globals.dump_level\n\n # lock update now, as we don't want any properties update functions to trigger rewrite of config file\n # which would lead to unwanted recursion\n engage_config_lock()\n\n config_container = _pix.get_data_from_file(get_config_filepath(), \" \")\n\n # avoid applying process of config if not present (most probably permission problems on config creation)\n if config_container is not None:\n\n settings_file_valid = 0\n for section in config_container:\n if settings_file_valid == 2:\n if section.type == \"Paths\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ProjectPath\":\n scs_project_path = prop[1]\n elif prop[0] == \"ShaderPresetsFilePath\":\n shader_presets_filepath = prop[1]\n elif prop[0] == \"TriggerActionsRelFilePath\":\n trigger_actions_rel_path = prop[1]\n elif prop[0] == \"TriggerActionsUseInfixed\":\n scs_globals.trigger_actions_use_infixed = prop[1]\n elif prop[0] == \"SignRelFilePath\":\n sign_library_rel_path = prop[1]\n elif prop[0] == \"SignUseInfixed\":\n scs_globals.sign_library_use_infixed = prop[1]\n elif prop[0] == \"TSemProfileRelFilePath\":\n tsem_library_rel_path = prop[1]\n elif prop[0] == \"TSemProfileUseInfixed\":\n scs_globals.tsem_library_use_infixed = prop[1]\n elif prop[0] == \"TrafficRulesRelFilePath\":\n traffic_rules_library_rel_path = prop[1]\n elif prop[0] == \"TrafficRulesUseInfixed\":\n scs_globals.traffic_rules_library_use_infixed = prop[1]\n elif prop[0] == \"HookupRelDirPath\":\n hookup_library_rel_path = prop[1]\n elif prop[0] == \"MatSubsRelFilePath\":\n matsubs_library_rel_path = prop[1]\n elif prop[0] == \"SunProfilesFilePath\":\n sun_profiles_library_path = prop[1]\n elif prop[0] == \"ConvertersPath\":\n conv_hlpr_converters_path = prop[1]\n elif prop[0] == \"UseAlternativeBases\":\n scs_globals.use_alternative_bases = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Import\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ImportScale\":\n scs_globals.import_scale = float(prop[1])\n elif prop[0] == \"PreservePathForExport\":\n scs_globals.import_preserve_path_for_export = prop[1]\n elif prop[0] == \"ImportPimFile\":\n scs_globals.import_pim_file = prop[1]\n elif prop[0] == \"UseWelding\":\n scs_globals.import_use_welding = prop[1]\n elif prop[0] == \"WeldingPrecision\":\n scs_globals.import_welding_precision = prop[1]\n elif prop[0] == \"UseNormals\":\n scs_globals.import_use_normals = prop[1]\n elif prop[0] == \"ImportPitFile\":\n scs_globals.import_pit_file = prop[1]\n elif prop[0] == \"LoadTextures\":\n scs_globals.import_load_textures = prop[1]\n elif prop[0] == \"ImportPicFile\":\n scs_globals.import_pic_file = prop[1]\n elif prop[0] == \"ImportPipFile\":\n scs_globals.import_pip_file = prop[1]\n elif prop[0] == \"ImportPisFile\":\n scs_globals.import_pis_file = prop[1]\n elif prop[0] == \"ConnectedBones\":\n scs_globals.import_connected_bones = prop[1]\n elif prop[0] == \"BoneImportScale\":\n scs_globals.import_bone_scale = float(prop[1])\n elif prop[0] == \"ImportPiaFile\":\n scs_globals.import_pia_file = prop[1]\n elif prop[0] == \"IncludeSubdirsForPia\":\n scs_globals.import_include_subdirs_for_pia = prop[1]\n elif section.type == \"Export\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ExportScale\":\n scs_globals.export_scale = float(prop[1])\n elif prop[0] == \"ApplyModifiers\":\n scs_globals.export_apply_modifiers = prop[1]\n elif prop[0] == \"ExcludeEdgesplit\":\n scs_globals.export_exclude_edgesplit = prop[1]\n elif prop[0] == \"IncludeEdgesplit\":\n scs_globals.export_include_edgesplit = prop[1]\n elif prop[0] == \"ActiveUVOnly\":\n scs_globals.export_active_uv_only = prop[1]\n elif prop[0] == \"ExportVertexGroups\":\n scs_globals.export_vertex_groups = prop[1]\n elif prop[0] == \"ExportVertexColor\":\n scs_globals.export_vertex_color = prop[1]\n elif prop[0] == \"ExportVertexColorType\":\n scs_globals.export_vertex_color_type = str(prop[1])\n elif prop[0] == \"ExportVertexColorType7\":\n scs_globals.export_vertex_color_type_7 = str(prop[1])\n elif prop[0] == \"ExportPimFile\":\n scs_globals.export_pim_file = prop[1]\n elif prop[0] == \"OutputType\":\n scs_globals.export_output_type = prop[1]\n elif prop[0] == \"ExportPitFile\":\n scs_globals.export_pit_file = prop[1]\n elif prop[0] == \"ExportPicFile\":\n scs_globals.export_pic_file = prop[1]\n elif prop[0] == \"ExportPipFile\":\n scs_globals.export_pip_file = prop[1]\n elif prop[0] == \"SignExport\":\n scs_globals.export_write_signature = prop[1]\n elif section.type == \"GlobalDisplay\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"DisplayLocators\":\n scs_globals.display_locators = prop[1]\n elif prop[0] == \"LocatorSize\":\n scs_globals.locator_size = float(prop[1])\n elif prop[0] == \"LocatorEmptySize\":\n scs_globals.locator_empty_size = float(prop[1])\n elif prop[0] == \"DisplayConnections\":\n scs_globals.display_connections = prop[1]\n elif prop[0] == \"CurveSegments\":\n scs_globals.curve_segments = prop[1]\n elif prop[0] == \"OptimizedConnsDrawing\":\n scs_globals.optimized_connections_drawing = prop[1]\n elif prop[0] == \"DisplayTextInfo\":\n scs_globals.display_info = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"GlobalColors\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"PrefabLocatorsWire\":\n scs_globals.locator_prefab_wire_color = prop[1]\n elif prop[0] == \"ModelLocatorsWire\":\n scs_globals.locator_model_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsWire\":\n scs_globals.locator_coll_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsFace\":\n scs_globals.locator_coll_face_color = prop[1]\n elif prop[0] == \"NavigationCurveBase\":\n scs_globals.np_connection_base_color = prop[1]\n elif prop[0] == \"MapLineBase\":\n scs_globals.mp_connection_base_color = prop[1]\n elif prop[0] == \"TriggerLineBase\":\n scs_globals.tp_connection_base_color = prop[1]\n elif prop[0] == \"InfoText\":\n scs_globals.info_text_color = prop[1]\n elif prop[0] == \"BasePaint\":\n scs_globals.base_paint_color = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Header\":\n for prop in section.props:\n if prop[0] == \"FormatVersion\":\n if prop[1] == 1:\n settings_file_valid += 1\n elif prop[0] == \"Type\":\n if prop[1] == \"Configuration\":\n settings_file_valid += 1\n elif prop[0] == \"DumpLevel\":\n dump_level = prop[1]\n elif prop[0] == \"ConfigStoragePlace\":\n scs_globals.config_storage_place = prop[1]\n\n # if settings are read directly from blend file,\n # release update lock and don't search/apply any settings further\n if prop[1] == \"BlendFile\":\n settings_file_valid += 1\n\n # as dump level can be read already (it can be placed above config storage place property),\n # reset local variable back to value that was saved with blend file\n dump_level = scs_globals.dump_level\n\n break # to avoid further reading of header properties, so dump_level won't be overwritten unintentionally\n\n scs_globals.dump_level = dump_level\n\n # now as last apply all of the file paths\n # NOTE: applying paths is crucial for libraries\n # (they are reloaded/initiated in property update functions).\n if bpy.app.background: # if blender runs without UI then apply libraries directly as async operator is UI depended\n\n scs_globals.scs_project_path = scs_project_path\n scs_globals.shader_presets_filepath = shader_presets_filepath\n scs_globals.trigger_actions_rel_path = trigger_actions_rel_path\n scs_globals.sign_library_rel_path = sign_library_rel_path\n scs_globals.tsem_library_rel_path = tsem_library_rel_path\n scs_globals.traffic_rules_library_rel_path = traffic_rules_library_rel_path\n scs_globals.hookup_library_rel_path = hookup_library_rel_path\n scs_globals.matsubs_library_rel_path = matsubs_library_rel_path\n scs_globals.sun_profiles_lib_path = sun_profiles_library_path\n scs_globals.conv_hlpr_converters_path = conv_hlpr_converters_path\n\n else: # if blender is started normally use asynchronous operator to reload libraries\n\n bpy.ops.world.scs_paths_initialization('INVOKE_DEFAULT', paths_list=[\n {\"name\": \"project base path\", \"attr\": \"scs_project_path\", \"path\": scs_project_path},\n {\"name\": \"shader presets\", \"attr\": \"shader_presets_filepath\", \"path\": shader_presets_filepath},\n {\"name\": \"trigger actions library\", \"attr\": \"trigger_actions_rel_path\", \"path\": trigger_actions_rel_path},\n {\"name\": \"sign library\", \"attr\": \"sign_library_rel_path\", \"path\": sign_library_rel_path},\n {\"name\": \"traffic semaphore library\", \"attr\": \"tsem_library_rel_path\", \"path\": tsem_library_rel_path},\n {\"name\": \"traffic rules library\", \"attr\": \"traffic_rules_library_rel_path\", \"path\": traffic_rules_library_rel_path},\n {\"name\": \"hookups library\", \"attr\": \"hookup_library_rel_path\", \"path\": hookup_library_rel_path},\n {\"name\": \"material substance library\", \"attr\": \"matsubs_library_rel_path\", \"path\": matsubs_library_rel_path},\n {\"name\": \"sun profiles library\", \"attr\": \"sun_profiles_lib_path\", \"path\": sun_profiles_library_path},\n {\"name\": \"converters file path\", \"attr\": \"conv_hlpr_converters_path\", \"path\": conv_hlpr_converters_path},\n ])\n\n # release lock as properties are applied\n release_config_lock(use_paths_init_callback=not bpy.app.background)\n\n return True", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)" ]
[ "0.71755046", "0.6829187", "0.65568334", "0.6541337", "0.65340614", "0.6411956", "0.6383412", "0.63540906", "0.63494205", "0.63442993", "0.63318974", "0.6290941", "0.6268238", "0.626067", "0.62480557", "0.61562616", "0.6133247", "0.6101819", "0.60940146", "0.5994293", "0.59849465", "0.59657276", "0.5961623", "0.5944461", "0.59103125", "0.5909731", "0.58973145", "0.58813167", "0.5871718", "0.5853137" ]
0.6874203
1
Collects one or more IDs to be returned to a pool. The collector DTO that is returned contains the list of collected IDs [Arguments]
def fusion_api_collect_pool(self, body, uri, api=None, headers=None): return self.idpool.collect(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _collect_set(self, pidset):", "def getIDs():", "def IDs(self, default=[{}]):\n tmp = self.data.get('ids', default)\n return [HEP.IDObject(i) for i in tmp]", "def get_ids(self) -> List[str]:", "def fusion_api_generate_pool(self, uri, api=None, headers=None):\n return self.idpool.generate(uri, api, headers)", "def get_all(self, *ids):", "def collect(self):\n pass", "def ids_to_process(self):\n htids = self.options[\"htids\"]\n # if id file is specified, get ids from the file\n if self.options[\"file\"]:\n with open(self.options[\"file\"]) as idfile:\n # add all non-empty lines with whitespace removed\n htids.extend(\n [line.strip() for line in idfile.readlines() if line.strip()]\n )\n\n self.stats[\"total\"] = len(htids)\n return htids", "def collect(self, item, collector_set):\n\t\tpass", "def pool_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"pool_ids\")", "def data_by_id(id_list: List[str]) -> Generator[GameData, None, None]:\n\ttarget = collection.find({\"_id\": {\"$in\": [ObjectId[id_] for id_ in id_list]}})\n\tfor data in target:\n\t\tyield GameData(data)", "def _collect_all(self):", "def gen_resources_for_ids(\n resource: Callable, res_ids: List[str], **list_params\n) -> Generator[List, None, None]:\n print(\"Generating resources for ids.\")\n total = len(res_ids)\n res_counter = 0\n\n if \"maxResults\" not in list_params.keys():\n list_params[\"maxResults\"] = DEFAULT_MAX_RESULTS\n max_results = DEFAULT_MAX_RESULTS\n else:\n max_results = list_params[\"maxResults\"]\n\n _res_ids = res_ids.copy()\n\n while len(_res_ids) > 0:\n request_ids = []\n for _ in range(max_results):\n request_ids.append(_res_ids.pop(0))\n\n if len(_res_ids) == 0:\n break\n\n print(\n f\"\\tRequesting {res_counter}-{res_counter + len(request_ids)} of {total}.\"\n )\n\n list_params[\"id\"] = \",\".join(request_ids)\n\n request = resource().list(**list_params)\n response = request.execute()\n yield response[\"items\"]\n\n res_counter += max_results\n\n print(\"\\tFinished requesting resources.\")\n return None", "def start_data_collection_by_agent_ids(agentIds=None):\n pass", "def make_collection(data_ids):\n idlist = []\n count = 0\n for c in range(0, len(data_ids)):\n data_id = data_ids[c]\n idlist.append({'src': \"hda\", 'id': data_id, 'name': str(count)})\n count += 1\n collection = {'collection_type': 'list', 'element_identifiers': idlist, 'name': 'collection'}\n return collection", "def count_many(self, ids):\r\n\r\n def serializer(val):\r\n if isinstance(val, (list, tuple)):\r\n return port.to_b(',').join(map(port.to_b, val))\r\n return base.serialize_param(val)\r\n\r\n url = '{0}/{1}'.format(self.get_url(), 'count_many')\r\n params = base.get_params(('ids',), locals(), serializer)\r\n return http.Request('GET', url, params), parsers.parse_json", "def _build_ID_sets(self):\n raise NotImplementedError", "def AllocateIds(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def allocate_ids(self, request):\n return self._call_method('allocateIds', request,\n datastore_v1_pb2.AllocateIdsResponse)", "def AllocateIds(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def AllocateIds(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ids(self):\n return list(self._id_generator())", "def ids(self):\n return list(self._id_generator())", "def get_pipe_ids(url, arg):\n encoded_pipelines = live_url_request(url, arg)\n return encoded_pipelines", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def AllocateIds(self, request, global_params=None):\n config = self.GetMethodConfig('AllocateIds')\n return self._RunMethod(\n config, request, global_params=global_params)", "def _Dynamic_AllocateIds(self, request, response, request_id=None):\n self._RemoteSend(request, response, \"AllocateIds\", request_id)\n return response", "def collect(self, collector):\n return collector(self)", "def genIdList(numId, idSize):\n\tiDs = []\n\tfor i in range(numId):\n\t\tiDs.append(genID(idSize))\n\treturn iDs", "def get_pool_ids(host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n pool_ids = []\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1, pool_ids\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n pool_ids.append(p[\"id\"])\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1, pool_ids\n return 0, pool_ids" ]
[ "0.58183867", "0.5673005", "0.56242716", "0.5622344", "0.5539857", "0.5519632", "0.55071205", "0.54633564", "0.5377036", "0.5368302", "0.5338279", "0.53363675", "0.5335429", "0.53343636", "0.5326961", "0.5323748", "0.5317773", "0.530632", "0.5293721", "0.52520114", "0.5240266", "0.52380764", "0.52380764", "0.5221611", "0.5210299", "0.5191098", "0.51810515", "0.51597524", "0.5145423", "0.51371694" ]
0.6546503
0
Gets a Pool specified by uri. [Arguments]
def fusion_api_get_pool(self, uri=None, api=None, headers=None): return self.idpool.get(uri=uri, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def get_pool():\n app = get_app()\n return app['pool']", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def _get_pool(self, *args, **kwargs):\n\n pool_name = '_pool_%s' % getattr(self, 'alias', 'common')\n\n if not hasattr (self.__class__, pool_name):\n lock = thread.allocate_lock()\n lock.acquire()\n\n try:\n pool = cx_Oracle.SessionPool(\n user=self.user,\n password=self.password,\n dsn=self.tns,\n min=CX_POOL_SESSION_MIN,\n max=CX_POOL_SESSION_MAX,\n increment=CX_POOL_SESSION_INCREMENT,\n connectiontype=cx_Oracle.Connection,\n threaded=CX_POOL_THREADED,\n getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT,\n homogeneous=True)\n except Exception as err:\n pool = None\n\n if pool:\n pool.timeout = CX_POOL_CONNECT_TIMEOUT\n setattr(self.__class__, pool_name, pool)\n else:\n msg = \"\"\" ### Database login failed or database not found ### \"\"\"\n raise self.Database_Error, ('%s') %(msg)\n\n lock.release()\n\n return getattr(self.__class__, pool_name)", "def storage_pool_get(context, storage_pool_id):\n return _storage_pool_get(context, storage_pool_id)", "def get_connection_pool(self, params):\r\n cp_params = dict(params)\r\n cp_params.update(self.pool_cls_kwargs)\r\n return self.pool_cls(**cp_params)", "def _get_pool (self, event):\n return self.pool", "def pool(self):\n return self._properties.get('pool')", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def get(self, uri=''):\n response = self._site\n for part in self._parts(uri):\n response = response[part]\n return response", "def get(cls, uri):\n return cls._perform_request(uri, 'GET')", "def get_object(uri):\n bucket_name, key = split_uri(uri)\n return get_client().get_object(bucket_name, key)", "def fusion_api_generate_pool(self, uri, api=None, headers=None):\n return self.idpool.generate(uri, api, headers)", "def get(self, uri=None, skip_cache=False, **kwargs):\n\n if uri:\n return self.get_from_uri(uri, skip_cache=skip_cache)\n\n return self.lookup(skip_cache=skip_cache, **kwargs)", "def pool(self) -> asyncpg.pool.Pool:\n return self.bot.pool", "def pool(self) -> Pool:\n assert self._pool is not None\n return self._pool", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def run(self, endpoint_uri, **kwargs):\n return self.get(endpoint_uri, **kwargs)", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def get_default_pool(con):\n try:\n return con.floating_ip_pool_read(fq_name=conf.get('default_pool', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find pool.')\n return None", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)" ]
[ "0.65973014", "0.6477442", "0.64513993", "0.63907945", "0.6380997", "0.6286233", "0.62298506", "0.616288", "0.60808253", "0.6068717", "0.5985519", "0.59641933", "0.5862789", "0.58119214", "0.57735586", "0.5723557", "0.5722562", "0.5722516", "0.5702338", "0.56483227", "0.56204635", "0.5610198", "0.5558938", "0.5510455", "0.54625", "0.54172415", "0.53958184", "0.5377124", "0.53648955", "0.5363754" ]
0.69169164
0
Creates a IPv4 Range. [Arguments]
def fusion_api_create_ipv4_range(self, body, api=None, headers=None): return self.ipv4range.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def str_to_range(lo, hi):\n x = rpki.ipaddrs.parse(lo)\n y = rpki.ipaddrs.parse(hi)\n assert type(x) == type(y)\n if isinstance(x, rpki.ipaddrs.v4addr):\n return rpki.resource_set.resource_range_ipv4(x, y)\n else:\n return rpki.resource_set.resource_range_ipv6(x, y)", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def new_ip(address):\n return ipaddress.IPv4Address(address)", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def fromV4(klass, ip):\n if not isinstance(ip, V4Address):\n ip = V4Address(str(ip))\n return klass(\"::ffff:{0!s}\".format(ip))", "def test_IPv4s_to_valid_CIDR(self):\n self.assertEqual(\n helpers.IPRange_to_valid_CIDR('192.168.0.1', '192.168.0.1'),\n '192.168.0.1/32'\n )", "def test_IPv4_to_CIDR(self):\n match_list = '1.2.3.0/29'\n self.assertEqual(helpers.IPRange_to_CIDR('1.2.3.1', '1.2.3.6'), match_list)", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def create(self, range):\n raise NotImplementedError", "def IPV4Argument(value):\n if not IsValidIPV4(value):\n raise argparse.ArgumentTypeError(\"invalid ipv4 value: '{0}'\".format(value))\n\n return value", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def get_dhcp_range(options, index):\n second_octet = 160 + index\n return \"192.%s.1.2-192.%s.255.254\" % (second_octet, second_octet)", "def xpointerNewRange(self, startindex, end, endindex):\n if end is None: end__o = None\n else: end__o = end._o\n ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)\n if ret is None:raise treeError('xmlXPtrNewRange() failed')\n return xpathObjectRet(ret)", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def encode_ipv4(self, input):\n return inet_aton(input)", "def test_ptr_in_dynamic_range(self):\n self.create_network_range(\n network_str='128.193.1.0/24', start_str='128.193.1.2',\n end_str='128.193.1.100', range_type='dy')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.1.2', ip_type='4', fqdn='foo.oregonstate.edu')", "def get_ip_range(self):\n return self._ip_range", "def fusion_api_allocate_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.allocate(body, uri, api, headers)", "def create_range(range_class):\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n return Range.objects.create(\n name=range_class.name,\n proxy_class=_class_path(range_class))", "def __init__(self, range_str):\n self.lo, self.hi = (self._parse_addr(addr_str) for addr_str in range_str.split('-'))" ]
[ "0.72949106", "0.716288", "0.6796408", "0.675997", "0.6509559", "0.63861024", "0.63720286", "0.6222499", "0.61475104", "0.6147029", "0.61271465", "0.61098194", "0.6094846", "0.6073817", "0.604635", "0.59926355", "0.5990896", "0.5971164", "0.58822244", "0.5699824", "0.5685605", "0.5639018", "0.5623919", "0.56229395", "0.5606378", "0.5581555", "0.5579637", "0.55419713", "0.54563105", "0.54423296" ]
0.7814607
0
Updates an IPv4 Range. [Arguments]
def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None): return self.ipv4range.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def ipv4(self, ipv4):\n\n self._ipv4 = ipv4", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def IPV4Argument(value):\n if not IsValidIPV4(value):\n raise argparse.ArgumentTypeError(\"invalid ipv4 value: '{0}'\".format(value))\n\n return value", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def update(\n self,\n Count=None,\n Dhcp4EchoRelayInfo=None,\n Dhcp6IaType=None,\n Enabled=None,\n IpAddress=None,\n IpAddressIncrement=None,\n IpAddressPoolIncrement=None,\n IpAddressPrefix=None,\n IpAddressPrefixIncrement=None,\n IpAddressPrefixPoolIncrement=None,\n IpDns1=None,\n IpDns2=None,\n IpGateway=None,\n IpGatewayIncrement=None,\n IpPrefix=None,\n IpType=None,\n Name=None,\n PrefixCount=None,\n PrefixLength=None,\n ServerAddress=None,\n ServerAddressIncrement=None,\n ServerCount=None,\n ServerGateway=None,\n ServerGatewayIncrement=None,\n ServerPrefix=None,\n UseRapidCommit=None,\n ):\n # type: (int, bool, str, bool, str, str, str, str, str, str, str, str, str, str, int, str, str, int, int, str, str, int, str, str, int, bool) -> DhcpServerRange\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))", "def fusion_api_edit_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.update(body, uri, api, headers)", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def fusion_api_edit_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.update(body, uri, api, headers)", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def setRange(self, x_range, y_range):\n pass", "def fusion_api_patch_ipv4_subnet(self, body, uri, param='', api=None, headers=None):\n return self.ipv4subnet.patch(body, uri, param, api, headers)", "def fusion_api_edit_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.update(body, uri, api, headers)", "def fix_addresses(start=None, end=None):\n if start in (None, idaapi.BADADDR):\n start = idaapi.cvar.inf.minEA\n\n if end in (None, idaapi.BADADDR):\n end = idaapi.cvar.inf.maxEA\n\n return start, end", "def fusion_api_edit_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.update(body, uri, api, headers)", "def update(self):\n self._sync_ranges()\n self._update_params()", "def set(self, addr, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set(self.map, addr, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set(self.map, addr, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")", "def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")", "def update(\n self,\n Enabled=None,\n FirstRoute=None,\n MaskWidth=None,\n Metric=None,\n NextHop=None,\n NumberOfRoute=None,\n RouteTag=None,\n Step=None,\n ):\n # type: (bool, str, int, int, str, int, int, int) -> RouteRange\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits" ]
[ "0.78092134", "0.64992386", "0.639456", "0.6309227", "0.6172596", "0.6076158", "0.60409886", "0.59660566", "0.59266126", "0.5883091", "0.57707864", "0.5672121", "0.559999", "0.55510175", "0.55309886", "0.5520255", "0.54902524", "0.5478357", "0.54560804", "0.53865826", "0.5339332", "0.5310301", "0.52955633", "0.52572197", "0.5249337", "0.524252", "0.5237061", "0.52277476", "0.52099097", "0.51733655" ]
0.793321
0
Deletes a IPv4 range based on name OR uri. [Arguments]
def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None): return self.ipv4range.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deleteAddressRange(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)", "def fusion_api_delete_vwwn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vwwnrange.delete(name, uri, api, headers)", "def delete_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_delete_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4subnet.delete(name, uri, api, headers)", "def fusion_api_delete_vmac_range(self, name=None, uri=None, api=None, headers=None):\n return self.vmacrange.delete(name, uri, api, headers)", "def DeleteRange(self, r):\n self.__context.builder.DocumentDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end)\n left = self._blip_data.content[:r.start]\n right = self._blip_data.content[r.end + 1:]\n self._blip_data.content = left + right", "def delIfMatchedAddr(ipv4Addresses_, fIpv4Addresses_):\n s1 = netaddr.IPSet(ipv4Addresses_)\n l2 = []\n for i in fIpv4Addresses_[:]:\n m = re.search(r'(.*) \\.\\.\\. (.*)', i)\n if not m:\n l2.append(i)\n else:\n l2 += netaddr.IPSet(netaddr.iter_iprange(m.group(1), m.group(2)))\n s2 = netaddr.IPSet(l2)\n return map(str, list(s1 - s2))", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def DeleteAnnotationsInRange(self, r, name):\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end,\n name)\n # TODO(davidbyttow): split local annotations.", "def delete(self, uri, where, selectionArgs):\n pass", "def unlink(address):", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def del_host(self, ipv4, rem_dpid, rem_port):\n assert(ipv4 is not None)\n assert(rem_dpid is not None)\n assert(rem_port is not None)\n LOG.info(\"Try to del host=%s -> (%s:%d)\" % (ipv4, rem_dpid, rem_port))\n\n ip_ = convert_ipv4_to_int(ipv4)\n self.del_link(ip_, 0, rem_dpid, rem_port)\n self.del_link(rem_dpid, rem_port, ip_, 0)\n self.del_node(ip_)", "def delete(fits: Optional[str], start: Optional[str], end: Optional[str], out: Optional[str]):\n delete_in_ssda(fits=fits, start=start, end=end, out=out)", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)", "def delete_endpoint(EndpointName=None):\n pass", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def deleteAttributeRange(self, startKey=None, endKey=None, limit=None):\n self.graph.deleteExtendedAttributeRange(entityId, startKey, endKey, limit)", "def delete(self, name, *args):\n\n if isinstance(name, string_types):\n name = dns.name.from_text(name, None)\n if len(args) == 0:\n self.find_rrset(self.authority, name, dns.rdataclass.ANY,\n dns.rdatatype.ANY, dns.rdatatype.NONE,\n dns.rdatatype.ANY, True, True)\n elif isinstance(args[0], dns.rdataset.Rdataset):\n for rds in args:\n for rd in rds:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n args = list(args)\n if isinstance(args[0], dns.rdata.Rdata):\n for rd in args:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n rdtype = args.pop(0)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if len(args) == 0:\n self.find_rrset(self.authority, name,\n self.zone_rdclass, rdtype,\n dns.rdatatype.NONE,\n dns.rdataclass.ANY,\n True, True)\n else:\n for s in args:\n rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,\n self.origin)\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def deleteRecords(table: db.Table, addrMap: ghidra.program.database.map.AddressMap, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address) -> bool:\n ...", "def remove(self, source, destination, port):\n logger.info('Removing path from %s to %s on port %s',\n source, destination, port)\n\n firewall_name = \"bu-%s-%s-%s\" % (destination.network.name, destination.name, port)\n\n def remove_from_ranges(to_remove, address_ranges):\n logger.info(\"Removing %s from %s\", to_remove, address_ranges)\n resulting_ranges = []\n if not address_ranges:\n return None\n for address_range in address_ranges:\n remove_net = ipaddress.IPv4Network(to_remove)\n address_range_network = ipaddress.IPv4Network(address_range)\n if remove_net.overlaps(address_range_network):\n if remove_net.prefixlen > address_range_network.prefixlen:\n new_range_networks = address_range_network.address_exclude(remove_net)\n resulting_ranges.extend([str(new_range_network) for new_range_network\n in new_range_networks])\n else:\n resulting_ranges.extend([str(address_range_network)])\n logger.info(\"New ranges: %s\", resulting_ranges)\n return resulting_ranges\n\n try:\n firewall = self.driver.ex_get_firewall(firewall_name)\n if isinstance(source, CidrBlock):\n firewall.source_ranges = remove_from_ranges(source.cidr_block,\n firewall.source_ranges)\n else:\n source_tag = \"%s-%s\" % (source.network.name, source.name)\n if firewall.source_tags:\n firewall.source_tags = [tag for tag in firewall.source_tags\n if tag != source_tag]\n except ResourceNotFoundError:\n logger.info(\"Firewall %s doesn't exist\", firewall_name)\n return None\n\n # We need this because the default is to add \"0.0.0.0/0\" if these aren't set, which is bad.\n if not firewall.source_tags and not firewall.source_ranges:\n return self.driver.ex_destroy_firewall(firewall)\n return self.driver.ex_update_firewall(firewall)", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def delete_network_segments(self, tenant_id, network_segments):", "def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)", "def UnsafeDestroyRange(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7396222", "0.71775776", "0.68177503", "0.6593932", "0.64722323", "0.6412465", "0.61702263", "0.58496267", "0.57524484", "0.57498884", "0.5725304", "0.5702309", "0.56434345", "0.56397855", "0.56372035", "0.56154263", "0.5552025", "0.5521105", "0.5487277", "0.546137", "0.53942084", "0.53925824", "0.53819704", "0.5348547", "0.5347153", "0.5342101", "0.5331111", "0.5319897", "0.52744395", "0.5266154" ]
0.8372809
0
Gets a default or paginated collection of IPv4 Ranges. [Arguments]
def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None): return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ip_range(self):\n return self._ip_range", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def discoverRanges(self):\n iprange = self.options.range\n if isinstance(iprange, basestring):\n iprange = [iprange]\n # in case someone uses 10.0.0.0-5,192.168.0.1-5 instead of\n # --range 10.0.0.0-5 --range 192.168.0.1-5\n if isinstance(iprange, list) and iprange[0].find(\",\") > -1:\n iprange = [n.strip() for n in iprange[0].split(\",\")]\n ips = []\n for rangelimit in iprange:\n # Parse to find ips included\n ips.extend(parse_iprange(rangelimit))\n results = yield self.pingMany(ips)\n goodips, badips = _partitionPingResults(results)\n self.log.debug(\n \"Found %d good IPs and %d bad IPs\", len(goodips), len(badips)\n )\n devices = yield self.discoverDevices(goodips)\n self.log.info(\"Discovered %d active IPs\", len(goodips))\n defer.returnValue(devices)", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def ip_restriction_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"ip_restriction_ranges\")", "def ranges(self):\n return self._ranges", "def get_network_ip_range():\n ip_set = set()\n default_route = get_default_route()\n\n assert default_route[1] == sc.conf.iface, \"incorrect sc.conf.iface\"\n\n iface_str = ''\n if sys.platform.startswith('win'):\n iface_info = sc.conf.iface\n iface_str = iface_info.guid\n else:\n iface_str = sc.conf.iface\n\n netmask = None\n for k, v in netifaces.ifaddresses(str(iface_str)).items():\n if v[0]['addr'] == default_route[2]:\n netmask = v[0]['netmask']\n break\n\n if netmask is None:\n return set()\n\n gateway_ip = netaddr.IPAddress(default_route[0])\n cidr = netaddr.IPAddress(netmask).netmask_bits()\n subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr))\n\n for ip in subnet:\n ip_set.add(str(ip))\n\n return ip_set", "def change_default_range(networks, number_excluded_ips,\n cut_from_start=True):\n for default_network in filter(\n lambda x: ((x['name'] != 'fuelweb_admin')and\n (x['name'] != 'private')),\n networks):\n default_range = [netaddr.IPAddress(str(ip)) for ip\n in default_network[\"ip_ranges\"][0]]\n if cut_from_start:\n new_range = [default_range[0],\n default_range[0] + number_excluded_ips]\n else:\n new_range = [default_range[0] + number_excluded_ips + 1,\n default_range[1]]\n default_network[\"ip_ranges\"][0] = [str(ip)\n for ip in new_range]", "def NumberOfMappingIPV4Ranges(self):\r\n\t\treturn self._get_attribute('numberOfMappingIPV4Ranges')", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_ip4_adresses(self):\n self._search_regx(self.PATTERN_IP4)\n return self._ip_adresses", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def get_range(n0: int, n1: int, ns: int) -> List[int]:\n # Return a range as a list\n def lrange(a, b, n=1) -> List[int]:\n return list(range(a, b, n))\n # Get the in-bounds part of the range\n n_range = lrange(max(0, n0), min(ns, n1))\n # Handle out-of-bounds indices by reflection across boundaries\n if n0 < 0:\n # Underflow\n n_range = lrange(-n0, 0, -1) + n_range\n if n1 > ns:\n # Overflow\n n_range = n_range + lrange(ns - 1, 2 * ns - n1 - 1, -1)\n\n return n_range", "def ip_restriction_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_restriction_ranges\")", "def ip_restriction_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ip_restriction_ranges\")", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def summarize_ranges(addrlist):\n ranges = []\n start = None\n prev_range_class = None\n for addr in addrlist:\n if start is None:\n start = addr.ip\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if addr.range_class == prev_range_class:\n if int(addr.ip) == int(end) + 1:\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n start = end = addr.ip\n prev_range_class = addr.range_class\n if start is not None:\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n\n return ranges", "def ranges(self) -> List[Range]:\n return list(iter(self._ranges))", "def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]" ]
[ "0.6789062", "0.6648467", "0.6641907", "0.6465023", "0.6461073", "0.6449472", "0.6449472", "0.6449472", "0.6449472", "0.6323299", "0.6323299", "0.6323299", "0.6323299", "0.62585264", "0.6239497", "0.61889344", "0.6185613", "0.61506927", "0.6150117", "0.6069847", "0.60507596", "0.6038636", "0.60227823", "0.599682", "0.599682", "0.59929776", "0.5983264", "0.5969871", "0.59431136", "0.5895303" ]
0.7178745
0
Allocate an IPv4 Range. [Arguments]
def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None): return self.ipv4range.allocate(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def fusion_api_allocate_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.allocate(body, uri, api, headers)", "def new_ip(address):\n return ipaddress.IPv4Address(address)", "def create(self, range):\n raise NotImplementedError", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def test_ptr_in_dynamic_range(self):\n self.create_network_range(\n network_str='128.193.1.0/24', start_str='128.193.1.2',\n end_str='128.193.1.100', range_type='dy')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.1.2', ip_type='4', fqdn='foo.oregonstate.edu')", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def test_IPv4s_to_valid_CIDR(self):\n self.assertEqual(\n helpers.IPRange_to_valid_CIDR('192.168.0.1', '192.168.0.1'),\n '192.168.0.1/32'\n )", "def str_to_range(lo, hi):\n x = rpki.ipaddrs.parse(lo)\n y = rpki.ipaddrs.parse(hi)\n assert type(x) == type(y)\n if isinstance(x, rpki.ipaddrs.v4addr):\n return rpki.resource_set.resource_range_ipv4(x, y)\n else:\n return rpki.resource_set.resource_range_ipv6(x, y)", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def xpointerNewRange(self, startindex, end, endindex):\n if end is None: end__o = None\n else: end__o = end._o\n ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)\n if ret is None:raise treeError('xmlXPtrNewRange() failed')\n return xpathObjectRet(ret)", "def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def test_IPv4_to_CIDR(self):\n match_list = '1.2.3.0/29'\n self.assertEqual(helpers.IPRange_to_CIDR('1.2.3.1', '1.2.3.6'), match_list)", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fromV4(klass, ip):\n if not isinstance(ip, V4Address):\n ip = V4Address(str(ip))\n return klass(\"::ffff:{0!s}\".format(ip))", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def fusion_api_allocate_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.allocate(body, uri, api, headers)", "def allocate_address():\n response = EC2.allocate_address(\n )\n return response", "def test_add_autoassigned_pool_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2,\n ip=self.DEFAULT_IPV4_POOL)\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def create_dhcp_pool(options, vsm_obj, range, default_gateway):\n edge = Edge(vsm_obj, '4.0')\n edge_id = get_edge(vsm_obj)\n edge.id = edge_id\n\n dhcp_py_dict = {\n 'enabled': True,\n 'logging': {'loglevel': 'info', 'enable': False},\n 'ippools': [\n {\n 'autoconfiguredns': True,\n 'defaultGateway': default_gateway,\n 'iprange': range,\n }\n ],\n }\n dhcp_client = DHCP(edge)\n print(\"Creating dhcp ippool with range %s\" % range)\n dhcp_schema_object = dhcp_client.get_schema_object(dhcp_py_dict)\n existing_dhcp_schema = dhcp_client.read()\n if existing_dhcp_schema and existing_dhcp_schema.ipPools:\n print \"append dhcp ippool to existing list\"\n dhcp_schema_object.ipPools = existing_dhcp_schema.ipPools + \\\n dhcp_schema_object.ipPools\n result = dhcp_client.create(dhcp_schema_object)\n\n if (result[0].response.status != 204):\n r_vars = vars(result[0])\n print(\"Create IP Pool error: %s\" % result[0].response.reason)\n print ', '.join(\"%s: %s\" % item for item in r_vars.items())\n return False\n return True", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)" ]
[ "0.7481766", "0.6659667", "0.661077", "0.6439414", "0.6322952", "0.6183965", "0.6094611", "0.6012776", "0.6004153", "0.598804", "0.59667003", "0.5963958", "0.5933521", "0.5932878", "0.5921185", "0.590265", "0.58689326", "0.5822074", "0.57630247", "0.5716651", "0.56743896", "0.5673166", "0.5648676", "0.5617136", "0.56153905", "0.56108624", "0.5607802", "0.5548732", "0.5451529", "0.5441674" ]
0.78125966
0
Collect an IPv4 Range. [Arguments]
def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None): return self.ipv4range.collect(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def str_to_range(lo, hi):\n x = rpki.ipaddrs.parse(lo)\n y = rpki.ipaddrs.parse(hi)\n assert type(x) == type(y)\n if isinstance(x, rpki.ipaddrs.v4addr):\n return rpki.resource_set.resource_range_ipv4(x, y)\n else:\n return rpki.resource_set.resource_range_ipv6(x, y)", "def get_ip_range(self):\n return self._ip_range", "def test_IPv4_to_CIDR(self):\n match_list = '1.2.3.0/29'\n self.assertEqual(helpers.IPRange_to_CIDR('1.2.3.1', '1.2.3.6'), match_list)", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range", "def get_ip4_adresses(self):\n self._search_regx(self.PATTERN_IP4)\n return self._ip_adresses", "def test_IPv4s_to_valid_CIDR(self):\n self.assertEqual(\n helpers.IPRange_to_valid_CIDR('192.168.0.1', '192.168.0.1'),\n '192.168.0.1/32'\n )", "def WhereAddressInRange(self, start, end=None):\n if isinstance(start, str):\n start = int(start, 16)\n if end is None:\n end = start + 1\n return self.Filter(lambda s: s.address >= start and s.address < end)", "def part_1(ranges: 'RangeSet') -> int:\n\n first_allowed = ranges.ranges[0].vmax + 1\n print(f\"part 1: first allowed IP address is {first_allowed}\")\n return first_allowed", "def filter_ipnet_range_size(network_cidr, range_start, range_end):\n try:\n network_cidr_str = unicode(network_cidr)\n range_start_str = unicode(range_start)\n range_end_str = unicode(range_end)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n range_start_str = str(range_start)\n range_end_str = str(range_end)\n try:\n ipnet = IPv4Network(network_cidr_str)\n ip1 = IPv4Address(range_start_str)\n ip2 = IPv4Address(range_end_str)\n\n if ip1 in ipnet and ip2 in ipnet:\n index1 = list(ipnet.hosts()).index(ip1)\n index2 = list(ipnet.hosts()).index(ip2)\n ip_range_size = index2 - index1 + 1\n return ip_range_size\n else:\n raise ValueError\n except ValueError as ex:\n logging.error(range_start_str + \" and \" + range_end_str +\n \" are not valid IP addresses for range inside \" +\n network_cidr_str)\n raise", "def get_dhcp_range(options, index):\n second_octet = 160 + index\n return \"192.%s.1.2-192.%s.255.254\" % (second_octet, second_octet)", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def get_local_address_range(self):\n return str(self.min_local_ip), str(self.max_local_ip)", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def IPV4Argument(value):\n if not IsValidIPV4(value):\n raise argparse.ArgumentTypeError(\"invalid ipv4 value: '{0}'\".format(value))\n\n return value", "def test_ipv6_in_range(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n \n assert test_ip.in_range(\"2000:0db8:85a3:08d3:1319:8a2e:0370:7344\",\"2002:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"2001:0db8:85a3:07d3:1319:8a2e:0370:7344\",\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"::ffff:1.1.1.1\",\"2501:0db8:85a3:08d3:1319:8a2e:0370:7344\")", "def summarize_ranges(addrlist):\n ranges = []\n start = None\n prev_range_class = None\n for addr in addrlist:\n if start is None:\n start = addr.ip\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if addr.range_class == prev_range_class:\n if int(addr.ip) == int(end) + 1:\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n start = end = addr.ip\n prev_range_class = addr.range_class\n if start is not None:\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n\n return ranges", "def getRange(self, epRange):\n epRange = list(map(int, epRange.split('-')))\n if len(epRange) > 1:\n return list(range(epRange[0], epRange[1]+1))\n else:\n return epRange", "def rangestr(\n src: str,\n lower: Optional[int] = None,\n upper: Optional[int] = None,\n delimiter: str = parsers.DEFAULT_DELIMITER,\n implicit_inclusion: bool = False,\n) -> Iterator[int]:\n ranges = parsers.parse_ranges(src, lower, upper, delimiter, implicit_inclusion)\n return _chain.from_iterable(map(lambda r: range(*r), ranges))", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def GetSRange(self):\n ...", "def discoverRanges(self):\n iprange = self.options.range\n if isinstance(iprange, basestring):\n iprange = [iprange]\n # in case someone uses 10.0.0.0-5,192.168.0.1-5 instead of\n # --range 10.0.0.0-5 --range 192.168.0.1-5\n if isinstance(iprange, list) and iprange[0].find(\",\") > -1:\n iprange = [n.strip() for n in iprange[0].split(\",\")]\n ips = []\n for rangelimit in iprange:\n # Parse to find ips included\n ips.extend(parse_iprange(rangelimit))\n results = yield self.pingMany(ips)\n goodips, badips = _partitionPingResults(results)\n self.log.debug(\n \"Found %d good IPs and %d bad IPs\", len(goodips), len(badips)\n )\n devices = yield self.discoverDevices(goodips)\n self.log.info(\"Discovered %d active IPs\", len(goodips))\n defer.returnValue(devices)" ]
[ "0.74724936", "0.71579176", "0.7090255", "0.7082461", "0.6992669", "0.6721628", "0.65612406", "0.65427846", "0.6521789", "0.64495397", "0.63997006", "0.624781", "0.6188592", "0.6122347", "0.6085487", "0.6070757", "0.60567445", "0.6046693", "0.6019463", "0.59318805", "0.5909256", "0.58796704", "0.5864434", "0.58313525", "0.57796013", "0.57762045", "0.57757574", "0.5772422", "0.5754204", "0.57377124" ]
0.71856034
1
Returns all fragments that have been allocated from a IPv4 Range [Arguments]
def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None): return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def get_ip4_adresses(self):\n self._search_regx(self.PATTERN_IP4)\n return self._ip_adresses", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def discoverRanges(self):\n iprange = self.options.range\n if isinstance(iprange, basestring):\n iprange = [iprange]\n # in case someone uses 10.0.0.0-5,192.168.0.1-5 instead of\n # --range 10.0.0.0-5 --range 192.168.0.1-5\n if isinstance(iprange, list) and iprange[0].find(\",\") > -1:\n iprange = [n.strip() for n in iprange[0].split(\",\")]\n ips = []\n for rangelimit in iprange:\n # Parse to find ips included\n ips.extend(parse_iprange(rangelimit))\n results = yield self.pingMany(ips)\n goodips, badips = _partitionPingResults(results)\n self.log.debug(\n \"Found %d good IPs and %d bad IPs\", len(goodips), len(badips)\n )\n devices = yield self.discoverDevices(goodips)\n self.log.info(\"Discovered %d active IPs\", len(goodips))\n defer.returnValue(devices)", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def NumberOfMappingIPV4Ranges(self):\r\n\t\treturn self._get_attribute('numberOfMappingIPV4Ranges')", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def ReassambleIpFragments(self):\n return self._get_attribute('reassambleIpFragments')", "def list_fragments(self):\n return list(self.data.fragments)", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def WhereAddressInRange(self, start, end=None):\n if isinstance(start, str):\n start = int(start, 16)\n if end is None:\n end = start + 1\n return self.Filter(lambda s: s.address >= start and s.address < end)", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def iter_fragments(self, frag_id_begin = None, frag_id_end = None):\n return iter_fragments(iter(self.fragment_list), frag_id_begin, frag_id_end)", "def populate_ranges(self,):\n self.ranges = list()\n # coredump: info target shows all sections in full detail\n # live debug: only file-backed sections are shown\n targetinfo = gdb.execute(\"info target\", False, True)\n for line in targetinfo.splitlines():\n line = line.strip()\n if line.startswith('`'):\n line = line.split(\"'\")[1]\n source = line[1:]\n continue\n if not line.startswith(\"0x\"):\n continue\n\n start, dash, end, str_is, memtype = line.split(maxsplit=4)\n assert(dash == '-' and str_is == 'is')\n start = int(start, 16)\n end = int(end, 16)\n new_range = MemoryRange(start, end-start, source, memtype)\n startoverlap = self.get_range(start)\n endoverlap = self.get_range(end)\n\n if endoverlap == startoverlap:\n endoverlap = None\n\n #TODO: splitup and punch holes/replace\n if memtype.startswith('.'):\n # gdb reports loadXXX sections on top of file-backed sections of the binary\n # probably because the kernel maps writeable pages on top of them\n # Therefore, keep the more accurate description from the file-backed section\n if startoverlap is not None and startoverlap.memtype == MemoryType.General:\n previous, current = self.split_range_at(start)\n self.ranges.remove(current)\n startoverlap = None\n if endoverlap is not None and endoverlap.memtype == MemoryType.General:\n current, end = self.split_range_at(end)\n self.ranges.remove(current)\n endoverlap = None\n\n if startoverlap is not None and endoverlap is not None:\n print(\"Overlapping memory ranges: %s in %s -> %s\" %\n (new_range, str(startoverlap), str(endoverlap)))\n bisect.insort(self.ranges, new_range)\n\n # live target: run-time allocated memory and some file-backed sections\n # There typically is overlap with the 'info target' output, so give precedence\n # to the previously added ranges\n mappinginfo = gdb.execute(\"info proc mappings\", False, True)\n for line in mappinginfo.splitlines():\n line = line.strip()\n if not line.startswith(\"0x\"):\n continue\n\n items = line.split()\n if len(items) == 4:\n start, end, size, offset = items\n source = \"unknown\"\n elif len(items) == 5:\n start, end, size, offset, source = items\n else:\n print(\"Unexpected line when parsing 'info proc mappings': %s\" % line)\n continue\n\n start = int(start, 16)\n size = int(size, 16)\n end = int(end, 16)\n\n new_range = MemoryRange(start, size, source, source)\n self.tentative_add_range(new_range)", "def ranges(self):\n for b in self.remaining_blocks:\n yield b, b * DEFAULT_BLOCK_SIZE, min(self.content_length, (b + 1) * DEFAULT_BLOCK_SIZE)", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')" ]
[ "0.67204875", "0.67177725", "0.66372305", "0.6619512", "0.660779", "0.64012045", "0.627785", "0.62567484", "0.6067107", "0.59505075", "0.59338576", "0.5867897", "0.58538926", "0.5714393", "0.5708639", "0.5678495", "0.5657969", "0.5631757", "0.562891", "0.56171983", "0.5423143", "0.54165864", "0.5372569", "0.5364874", "0.536367", "0.5348807", "0.5290383", "0.5286132", "0.52567244", "0.52468944" ]
0.7642433
0
Returns all the free fragments in a IPv4 Range. [Arguments]
def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None): return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def ReassambleIpFragments(self):\n return self._get_attribute('reassambleIpFragments')", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def get_ip4_adresses(self):\n self._search_regx(self.PATTERN_IP4)\n return self._ip_adresses", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def NumberOfMappingIPV4Ranges(self):\r\n\t\treturn self._get_attribute('numberOfMappingIPV4Ranges')", "def ranges(self):\n for b in self.remaining_blocks:\n yield b, b * DEFAULT_BLOCK_SIZE, min(self.content_length, (b + 1) * DEFAULT_BLOCK_SIZE)", "def discoverRanges(self):\n iprange = self.options.range\n if isinstance(iprange, basestring):\n iprange = [iprange]\n # in case someone uses 10.0.0.0-5,192.168.0.1-5 instead of\n # --range 10.0.0.0-5 --range 192.168.0.1-5\n if isinstance(iprange, list) and iprange[0].find(\",\") > -1:\n iprange = [n.strip() for n in iprange[0].split(\",\")]\n ips = []\n for rangelimit in iprange:\n # Parse to find ips included\n ips.extend(parse_iprange(rangelimit))\n results = yield self.pingMany(ips)\n goodips, badips = _partitionPingResults(results)\n self.log.debug(\n \"Found %d good IPs and %d bad IPs\", len(goodips), len(badips)\n )\n devices = yield self.discoverDevices(goodips)\n self.log.info(\"Discovered %d active IPs\", len(goodips))\n defer.returnValue(devices)", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def list_fragments(self):\n return list(self.data.fragments)", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def length_n_frags(mol, initial):\n frags = []\n current_frag = initial\n if len(current_frag) >= 4:\n return [current_frag]\n\n neighbor_indices = mol.graph.neighbors[current_frag[-1]]\n for neighbor_ind in neighbor_indices:\n if neighbor_ind not in current_frag:\n new_frag = current_frag + (neighbor_ind, )\n frags += length_n_frags(mol, new_frag)\n return frags", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def fragmentation(free_resources_gaps, p=2):\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sum(fi**p) / sum(fi)**p)\n frag.set_value(i, frag_i)\n return frag" ]
[ "0.72713524", "0.68684775", "0.68349123", "0.67930484", "0.672236", "0.65734833", "0.64969206", "0.6338586", "0.6236195", "0.57705426", "0.5738103", "0.5639637", "0.5541575", "0.549044", "0.5446197", "0.5423923", "0.53578913", "0.5311482", "0.52758336", "0.52411574", "0.5227314", "0.5219765", "0.5194855", "0.5185877", "0.51646066", "0.5134107", "0.51317585", "0.51213664", "0.5096507", "0.5086342" ]
0.79109085
0
Patch an IPv4 Range. [Arguments]
def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None): return self.ipv4range.patch(body, uri, param, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip", "def fusion_api_patch_ipv4_subnet(self, body, uri, param='', api=None, headers=None):\n return self.ipv4subnet.patch(body, uri, param, api, headers)", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def fusion_api_allocate_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.allocate(body, uri, api, headers)", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def test_ipam_ip_addresses_partial_update(self):\n pass", "def IPV4Argument(value):\n if not IsValidIPV4(value):\n raise argparse.ArgumentTypeError(\"invalid ipv4 value: '{0}'\".format(value))\n\n return value", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def ipv4(self, ipv4):\n\n self._ipv4 = ipv4", "def fusion_api_edit_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.update(body, uri, api, headers)", "def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))", "def fusion_api_collect_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.collect(body, uri, api, headers)", "def set(self, addr, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set(self.map, addr, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set(self.map, addr, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def test_ipam_ip_addresses_update(self):\n pass", "def setRange(self, x_range, y_range):\n pass", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def fix_addresses(start=None, end=None):\n if start in (None, idaapi.BADADDR):\n start = idaapi.cvar.inf.minEA\n\n if end in (None, idaapi.BADADDR):\n end = idaapi.cvar.inf.maxEA\n\n return start, end", "def fusion_api_edit_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.update(body, uri, api, headers)", "def change_default_range(networks, number_excluded_ips,\n cut_from_start=True):\n for default_network in filter(\n lambda x: ((x['name'] != 'fuelweb_admin')and\n (x['name'] != 'private')),\n networks):\n default_range = [netaddr.IPAddress(str(ip)) for ip\n in default_network[\"ip_ranges\"][0]]\n if cut_from_start:\n new_range = [default_range[0],\n default_range[0] + number_excluded_ips]\n else:\n new_range = [default_range[0] + number_excluded_ips + 1,\n default_range[1]]\n default_network[\"ip_ranges\"][0] = [str(ip)\n for ip in new_range]", "def test_patch_host_subnet(self):\n pass", "def change_ip(self, address: int) -> None:\n self.regs[\"ip\"].write(address)", "def deleteAddressRange(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def format_ipv4(value, mask=None):\n value_ipv4 = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(value))])\n if mask is None:\n return value_ipv4\n value_mask = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(mask))])\n return \"{}/{}\".format(value_ipv4, value_mask)" ]
[ "0.7379995", "0.6352746", "0.6240742", "0.6168495", "0.6013391", "0.599968", "0.5995755", "0.5837365", "0.5659868", "0.56368583", "0.5605617", "0.55994976", "0.55361867", "0.5511782", "0.54048556", "0.5372726", "0.5357076", "0.53434396", "0.533772", "0.5325003", "0.53189754", "0.53099024", "0.52712494", "0.52681947", "0.52338094", "0.5220881", "0.5172765", "0.51647043", "0.51393634", "0.51374394" ]
0.8050229
0
Creates a IPv4 Subnet. [Arguments]
def fusion_api_create_ipv4_subnet(self, body, sessionID=None, api=None, headers=None): return self.ipv4subnet.create(body, sessionID, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def create_subnet(self, body=None):\r\n return self.post(self.subnets_path, body=body)", "def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')", "def test_create_host_subnet(self):\n pass", "def fusion_api_allocate_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.allocate(body, uri, api, headers)", "def create_subnet(\n self,\n network_name_or_id,\n cidr=None,\n ip_version=4,\n enable_dhcp=False,\n subnet_name=None,\n tenant_id=None,\n allocation_pools=None,\n gateway_ip=None,\n disable_gateway_ip=False,\n dns_nameservers=None,\n host_routes=None,\n ipv6_ra_mode=None,\n ipv6_address_mode=None,\n prefixlen=None,\n use_default_subnetpool=False,\n **kwargs,\n ):\n\n if tenant_id is not None:\n filters = {'tenant_id': tenant_id}\n else:\n filters = None\n\n network = self.get_network(network_name_or_id, filters)\n if not network:\n raise exc.OpenStackCloudException(\n \"Network %s not found.\" % network_name_or_id\n )\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n if not cidr and not use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr is required when a subnetpool is not used'\n )\n\n if cidr and use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr must be set to None when use_default_subnetpool == '\n 'True'\n )\n\n # Be friendly on ip_version and allow strings\n if isinstance(ip_version, str):\n try:\n ip_version = int(ip_version)\n except ValueError:\n raise exc.OpenStackCloudException(\n 'ip_version must be an integer'\n )\n\n # The body of the neutron message for the subnet we wish to create.\n # This includes attributes that are required or have defaults.\n subnet = dict(\n {\n 'network_id': network['id'],\n 'ip_version': ip_version,\n 'enable_dhcp': enable_dhcp,\n },\n **kwargs,\n )\n\n # Add optional attributes to the message.\n if cidr:\n subnet['cidr'] = cidr\n if subnet_name:\n subnet['name'] = subnet_name\n if tenant_id:\n subnet['tenant_id'] = tenant_id\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n if ipv6_ra_mode:\n subnet['ipv6_ra_mode'] = ipv6_ra_mode\n if ipv6_address_mode:\n subnet['ipv6_address_mode'] = ipv6_address_mode\n if prefixlen:\n subnet['prefixlen'] = prefixlen\n if use_default_subnetpool:\n subnet['use_default_subnetpool'] = True\n\n return self.network.create_subnet(**subnet)", "def create_subnet ( vpc_conn,\n ec2_conn,\n vpc_id,\n subnet_cidr,\n zone_name,\n subnet_basename ) :\n subnet = vpc_conn.create_subnet( vpc_id, subnet_cidr, zone_name )\n aws_cmd( ec2_conn.create_tags, [ subnet.id,\n { \"Name\": subnet_basename + \"-\" + zone_name[-1].upper( ) + \"-Subnet\" } ] )\n return subnet", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def subnetpool_create(request, name, prefixes, **kwargs):\n LOG.debug(\"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, \"\n \"kwargs=%(kwargs)s\", {'name': name, 'prefixes': prefixes,\n 'kwargs': kwargs})\n body = {'subnetpool':\n {'name': name,\n 'prefixes': prefixes,\n }\n }\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnetpool'].update(kwargs)\n subnetpool = \\\n neutronclient(request).create_subnetpool(body=body).get('subnetpool')\n return SubnetPool(subnetpool)", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def test_create_subnet(self):\n client_token = generate_client_token()\n subnet_name = 'test_subnet_name1' + client_token\n subnet_cidr = '192.168.0.64/26'\n self.assertEqual(\n type(self.the_client.create_subnet(subnet_name,\n 'cn-bj-a',\n subnet_cidr,\n vpc_id,\n client_token=client_token)),\n baidubce.bce_response.BceResponse)", "def create(self, name, network_id, tenant_id, gateway_ip, cidr, \n allocation_pools=None, enable_dhcp=True, host_routes=None,\n dns_nameservers=['8.8.8.7', '8.8.8.8']):\n data = {\n \"subnet\": {\n \"name\": name,\n \"network_id\": network_id,\n \"tenant_id\": tenant_id,\n \"ip_version\": 4,\n \"cidr\": cidr,\n \"gateway_ip\": gateway_ip,\n }\n }\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers\n\n path = '%s/subnets' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def create_namespaced_host_subnet(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def create_subnet(ec2, vpc, \n subnet_name,\n subnet_region, \n subnet_cidr_block,\n subnet_type=\"private\"):\n # create a public subnet within the VPC\n print(\"\\n===Creating a \"+subnet_type+\" subnet...\")\n subnet = ec2.create_subnet(\n AvailabilityZone=subnet_region,\n CidrBlock=subnet_cidr_block,\n VpcId=vpc.vpc_id,\n DryRun=False,\n TagSpecifications=[{\n \"ResourceType\":\"subnet\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": subnet_name},\n ]\n }])\n \n print(f\"===Subnet {subnet_name} is available!\")\n return subnet", "def gen_ipam_subnet(ip_prefix, ip_prefix_len, default_gateway):\n subnet = SubnetType(ip_prefix=ip_prefix, ip_prefix_len=ip_prefix_len)\n ipam_subnet = IpamSubnetType(subnet=subnet, default_gateway=default_gateway)\n vn_subnet = VnSubnetsType(ipam_subnets=[ipam_subnet])\n return vn_subnet", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def create_subnet(self, network_name, subnet_name, cidr):\n _net_id = self.get_net_id(network_name)\n if not isinstance(_net_id, unicode):\n return\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _subnet_info = {\"subnet\":\n {\"ip_version\": 4,\n \"network_id\": _net_id,\n \"cidr\": cidr,\n \"name\": subnet_name}}\n\n _body = json.dumps(_subnet_info)\n\n LOG_OBJ.debug(\"Creating subnet in network %s of tenant %s.\"\n % (_net_id, self.project_info[\"project_id\"]))\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating subnet\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of subnet Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Subnet details : %s \" % output['subnet'])\n return output['subnet']['id']", "def subnetwork_to_ip_range(subnetwork):\n \n try:\n fragments = subnetwork.split('/')\n network_prefix = fragments[0]\n netmask_len = int(fragments[1])\n \n # try parsing the subnetwork first as IPv4, then as IPv6\n for version in (socket.AF_INET, socket.AF_INET6):\n \n ip_len = 32 if version == socket.AF_INET else 128\n \n try:\n suffix_mask = (1 << (ip_len - netmask_len)) - 1\n netmask = ((1 << ip_len) - 1) - suffix_mask\n ip_hex = socket.inet_pton(version, network_prefix)\n ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask\n ip_upper = ip_lower + suffix_mask\n \n return (ip_lower,\n ip_upper,\n 4 if version == socket.AF_INET else 6)\n except:\n pass\n except:\n pass\n \n raise ValueError(\"invalid subnetwork\")", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def __init__(self, address, netmask=None):\n\n if netmask:\n ip = Ipv4Address(address)\n address = \"%s/%s\" % (ip,netmask)\n\n google.ipaddr.IPv4Network.__init__(self, address, strict=False)", "def post_subnet_create(self, resource_dict):\n pass", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def add_subnet(self, subnet_type, quantity=None, vlan_id=None, version=4,\r\n test_order=False):\r\n package = self.client['Product_Package']\r\n category = 'sov_sec_ip_addresses_priv'\r\n desc = ''\r\n if version == 4:\r\n if subnet_type == 'global':\r\n quantity = 0\r\n category = 'global_ipv4'\r\n elif subnet_type == 'public':\r\n category = 'sov_sec_ip_addresses_pub'\r\n else:\r\n category = 'static_ipv6_addresses'\r\n if subnet_type == 'global':\r\n quantity = 0\r\n category = 'global_ipv6'\r\n desc = 'Global'\r\n elif subnet_type == 'public':\r\n desc = 'Portable'\r\n\r\n # In the API, every non-server item is contained within package ID 0.\r\n # This means that we need to get all of the items and loop through them\r\n # looking for the items we need based upon the category, quantity, and\r\n # item description.\r\n price_id = None\r\n quantity_str = str(quantity)\r\n for item in package.getItems(id=0, mask='itemCategory'):\r\n category_code = lookup(item, 'itemCategory', 'categoryCode')\r\n if all([category_code == category,\r\n item.get('capacity') == quantity_str,\r\n version == 4 or (version == 6 and\r\n desc in item['description'])]):\r\n price_id = item['prices'][0]['id']\r\n break\r\n\r\n if not price_id:\r\n raise TypeError('Invalid combination specified for ordering a'\r\n ' subnet.')\r\n\r\n order = {\r\n 'packageId': 0,\r\n 'prices': [{'id': price_id}],\r\n 'quantity': 1,\r\n # This is necessary in order for the XML-RPC endpoint to select the\r\n # correct order container\r\n 'complexType': 'SoftLayer_Container_Product_Order_Network_Subnet',\r\n }\r\n\r\n if subnet_type != 'global':\r\n order['endPointVlanId'] = vlan_id\r\n\r\n if test_order:\r\n return self.client['Product_Order'].verifyOrder(order)\r\n else:\r\n return self.client['Product_Order'].placeOrder(order)", "def randomSubBuilder(dom: string, src_ip: string, dst_ip: string, src_port: int, t: float, seed: float):\n id_IP = int(RandShort()) #id for IP layer\n id_DNS = int(RandShort()) #id for DNS layer\n sub = randomSub(seed) #Random subdomain\n q_name = sub + '.' + dom #Complete domain request\n ans = Ether(src= '18:66:da:e6:36:56', dst= '18:66:da:4d:c0:08')/IP(src = src_ip, dst = dst_ip, id = id_IP)/UDP(sport = src_port)/DNS(rd = 0, id= id_DNS, qd=DNSQR(qname=str(q_name)))\n ans.time = t #Set time\n return ans" ]
[ "0.67275494", "0.65961397", "0.6457908", "0.64531577", "0.6452566", "0.6342957", "0.63365185", "0.6277384", "0.6223992", "0.6195576", "0.61954707", "0.6173503", "0.6163646", "0.59720856", "0.5957371", "0.59508234", "0.5924179", "0.58801264", "0.5848472", "0.5744029", "0.57258254", "0.57181525", "0.56605864", "0.5647744", "0.56245446", "0.5611237", "0.5590154", "0.55111694", "0.5457614", "0.5449798" ]
0.7072949
0
Deletes a IPv4 Subnet based on name OR uri. [Arguments]
def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None): return self.ipv4subnet.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def delete_subnet(self, name_or_id):\n subnet = self.network.find_subnet(name_or_id, ignore_missing=True)\n if not subnet:\n self.log.debug(\"Subnet %s not found for deleting\", name_or_id)\n return False\n\n self.network.delete_subnet(subnet)\n\n return True", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def delete_subnet(self, subnet):\r\n return self.delete(self.subnet_path % (subnet))", "def test_delete_host_subnet(self):\n pass", "def delete_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_delete_collection_host_subnet(self):\n pass", "def deletecollection_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n try:\n self.rpc_handler.delete_subnet({str(context._subnet.get('id', '')): {}})\n except:\n pass", "def subnet_delete_end(self, payload):\n subnet_id = payload['subnet_id']\n network = self.cache.get_network_by_subnet_id(subnet_id)\n if network:\n self.refresh_dhcp_helper(network.id)", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def delete_network_segments(self, tenant_id, network_segments):", "def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def test_delete_subnet(self):\n self.assertEqual(\n type(self.the_client.delete_subnet(subnet_id)),\n baidubce.bce_response.BceResponse)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete_subnet_postcommit(self, mech_context):\n LOG.debug(\"delete_subnetwork_postcommit: called\")", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def do_nic_delete(cc, args):\n cc.nic.delete(args.uuid)\n print(_(\"%s deleted\" % args.uuid))", "def pre_subnet_delete(self, resource_id):\n pass", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)", "def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)" ]
[ "0.6917884", "0.66710883", "0.66246843", "0.6576569", "0.652274", "0.65066206", "0.64631164", "0.632984", "0.6324097", "0.62425745", "0.6142387", "0.61391616", "0.6127574", "0.61196274", "0.6115805", "0.60570866", "0.6028527", "0.59908265", "0.59759915", "0.59225667", "0.592016", "0.5913714", "0.58507687", "0.57852364", "0.5769305", "0.5763791", "0.5758377", "0.5730421", "0.5664229", "0.5651355" ]
0.7693423
0
Updates an IPv4 Subnet. [Arguments]
def fusion_api_edit_ipv4_subnet(self, body, uri, api=None, headers=None): return self.ipv4subnet.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_patch_ipv4_subnet(self, body, uri, param='', api=None, headers=None):\n return self.ipv4subnet.patch(body, uri, param, api, headers)", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def update(self, oid, name=None, network_id=None, tenant_id=None, \n gateway_ip=None, cidr=None, allocation_pools=None, \n enable_dhcp=None, host_routes=None, dns_nameservers=None):\n data = {\n \"subnet\": {\n }\n }\n \n if network_id is not None:\n data['subnet']['network_id'] = network_id\n if tenant_id is not None:\n data['subnet']['tenant_id'] = tenant_id\n if cidr is not None:\n data['subnet']['cidr'] = cidr\n if gateway_ip is not None:\n data['subnet']['gateway_ip'] = gateway_ip\n if name is not None:\n data['subnet']['name'] = name\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers \n \n path = '%s/subnets/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def subnet_update_end(self, payload):\n network_id = payload['subnet']['network_id']\n self.refresh_dhcp_helper(network_id)", "def update_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n subnet = self._get_subnet_info(context._subnet)\n if subnet is not None:\n try:\n self.rpc_handler.update_subnet(subnet)\n except:\n pass", "def update_subnet(\n self,\n name_or_id,\n subnet_name=None,\n enable_dhcp=None,\n gateway_ip=None,\n disable_gateway_ip=None,\n allocation_pools=None,\n dns_nameservers=None,\n host_routes=None,\n ):\n subnet = {}\n if subnet_name:\n subnet['name'] = subnet_name\n if enable_dhcp is not None:\n subnet['enable_dhcp'] = enable_dhcp\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n\n if not subnet:\n self.log.debug(\"No subnet data to update\")\n return\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n curr_subnet = self.get_subnet(name_or_id)\n if not curr_subnet:\n raise exc.OpenStackCloudException(\n \"Subnet %s not found.\" % name_or_id\n )\n\n return self.network.update_subnet(curr_subnet, **subnet)", "def test_update_subnet(self):\n self.assertEqual(\n type(self.the_client.update_subnet(subnet_id, 'test_update_name1',\n 'test_update_description1')),\n baidubce.bce_response.BceResponse)", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def test_patch_host_subnet(self):\n pass", "def test_replace_host_subnet(self):\n pass", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def replace_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def post_save_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.add_or_update_entry(subnet=str(instance.ip_network), net_name=instance.name)", "def update_reserve_ip_subpool(self,\n id,\n site_id,\n ipv4DhcpServers=None,\n ipv4DnsServers=None,\n ipv6AddressSpace=None,\n ipv6DhcpServers=None,\n ipv6DnsServers=None,\n ipv6GateWay=None,\n ipv6GlobalPool=None,\n ipv6Prefix=None,\n ipv6PrefixLength=None,\n ipv6Subnet=None,\n ipv6TotalHost=None,\n name=None,\n slaacSupport=None,\n headers=None,\n payload=None,\n active_validation=True,\n **request_parameters):\n check_type(headers, dict)\n check_type(payload, dict)\n check_type(id, basestring,\n may_be_none=False)\n check_type(site_id, basestring,\n may_be_none=False)\n if headers is not None:\n if 'X-Auth-Token' in headers:\n check_type(headers.get('X-Auth-Token'),\n basestring, may_be_none=False)\n\n _params = {\n 'id':\n id,\n }\n _params.update(request_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'siteId': site_id,\n }\n _payload = {\n 'name':\n name,\n 'ipv6AddressSpace':\n ipv6AddressSpace,\n 'ipv4DhcpServers':\n ipv4DhcpServers,\n 'ipv4DnsServers':\n ipv4DnsServers,\n 'ipv6GlobalPool':\n ipv6GlobalPool,\n 'ipv6Prefix':\n ipv6Prefix,\n 'ipv6PrefixLength':\n ipv6PrefixLength,\n 'ipv6Subnet':\n ipv6Subnet,\n 'ipv6GateWay':\n ipv6GateWay,\n 'ipv6DhcpServers':\n ipv6DhcpServers,\n 'ipv6DnsServers':\n ipv6DnsServers,\n 'ipv6TotalHost':\n ipv6TotalHost,\n 'slaacSupport':\n slaacSupport,\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation:\n self._request_validator('jsd_fd6083b0c65d03b2d53f10b3ece59d_v2_2_1')\\\n .validate(_payload)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n e_url = ('/dna/intent/api/v1/reserve-ip-subpool/{siteId}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n json_data = self._session.put(endpoint_full_url, params=_params,\n json=_payload,\n headers=_headers)\n else:\n json_data = self._session.put(endpoint_full_url, params=_params,\n json=_payload)\n\n return self._object_factory('bpm_fd6083b0c65d03b2d53f10b3ece59d_v2_2_1', json_data)", "def fusion_api_create_ipv4_subnet(self, body, sessionID=None, api=None, headers=None):\n return self.ipv4subnet.create(body, sessionID, api, headers)", "def patch_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def pre_subnet_update(self, resource_id, resource_dict):\n pass", "def sub_interface(enode, portlbl, subint, addr=None, up=None, shell=None):\n assert portlbl\n assert subint\n port = enode.ports[portlbl]\n\n if addr is not None:\n assert ip_interface(addr)\n cmd = 'ip addr add {addr} dev {port}.{subint}'.format(addr=addr,\n port=port,\n subint=subint)\n response = enode(cmd, shell=shell)\n assert not response\n\n if up is not None:\n if up:\n interface(enode, portlbl, up=up)\n\n cmd = 'ip link set dev {port}.{subint} {state}'.format(\n port=port, subint=subint, state='up' if up else 'down'\n )\n response = enode(cmd, shell=shell)\n assert not response", "def update_subnet_postcommit(self, mech_context):\n LOG.debug(\"update_subnet_postcommit: called\")", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def update(oid, subnetModeDetails):\n\n app.logger.debug(pformat(subnetModeDetails))\n\n if 'id' in subnetModeDetails and subnetModeDetails['id'] != oid:\n abort(400, f\"Key mismatch in path and body\")\n\n existing_subnetMode = SubnetMode.query.filter(SubnetMode.id == oid).one_or_none()\n\n if existing_subnetMode is not None:\n SubnetMode.query.filter(SubnetMode.id == oid).update(subnetModeDetails)\n db.session.commit()\n schema = SubnetModeSchema()\n data = schema.dump(existing_subnetMode)\n return data, 200\n else:\n abort(404, f\"SubnetMode {oid} not found\")", "def src_subnet(self, src_subnet):\n\n self._src_subnet = src_subnet", "def subnetwork_to_ip_range(subnetwork):\n \n try:\n fragments = subnetwork.split('/')\n network_prefix = fragments[0]\n netmask_len = int(fragments[1])\n \n # try parsing the subnetwork first as IPv4, then as IPv6\n for version in (socket.AF_INET, socket.AF_INET6):\n \n ip_len = 32 if version == socket.AF_INET else 128\n \n try:\n suffix_mask = (1 << (ip_len - netmask_len)) - 1\n netmask = ((1 << ip_len) - 1) - suffix_mask\n ip_hex = socket.inet_pton(version, network_prefix)\n ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask\n ip_upper = ip_lower + suffix_mask\n \n return (ip_lower,\n ip_upper,\n 4 if version == socket.AF_INET else 6)\n except:\n pass\n except:\n pass\n \n raise ValueError(\"invalid subnetwork\")", "def update(\n self,\n Count=None,\n Dhcp4EchoRelayInfo=None,\n Dhcp6IaType=None,\n Enabled=None,\n IpAddress=None,\n IpAddressIncrement=None,\n IpAddressPoolIncrement=None,\n IpAddressPrefix=None,\n IpAddressPrefixIncrement=None,\n IpAddressPrefixPoolIncrement=None,\n IpDns1=None,\n IpDns2=None,\n IpGateway=None,\n IpGatewayIncrement=None,\n IpPrefix=None,\n IpType=None,\n Name=None,\n PrefixCount=None,\n PrefixLength=None,\n ServerAddress=None,\n ServerAddressIncrement=None,\n ServerCount=None,\n ServerGateway=None,\n ServerGatewayIncrement=None,\n ServerPrefix=None,\n UseRapidCommit=None,\n ):\n # type: (int, bool, str, bool, str, str, str, str, str, str, str, str, str, str, int, str, str, int, int, str, str, int, str, str, int, bool) -> DhcpServerRange\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def put(self, ip):\n data = request.json\n update_ue_sub(ip, data)\n return None, 204", "def rule_40_extend_subnet_cidr(session):\n\n config, conn = session[\"config\"], session[\"conn\"]\n\n def append_cidr(config_side, conn_vpc):\n\n cidr = conn_vpc.get_all_subnets([\n config_side[\"res\"][\"subnet_id\"]\n ])[0].cidr_block\n\n for user_cidr in config_side[\"ipsec\"][\"subnets\"]:\n if cidr_overlaps(cidr, user_cidr):\n return\n\n config_side[\"ipsec\"][\"subnets\"].append(cidr)\n\n append_cidr(config[\"server\"], conn[\"server\"](\"vpc\"))\n append_cidr(config[\"client\"], conn[\"client\"](\"vpc\"))\n\n return True", "def update_user(self, queue: SubnetQueue, *args):", "def subnet_id(self, subnet_id):\n self._subnet_id = subnet_id" ]
[ "0.69432354", "0.68962246", "0.6033762", "0.5991842", "0.59165764", "0.5887009", "0.5862536", "0.58412373", "0.5774587", "0.56625205", "0.55930793", "0.5589871", "0.55673563", "0.5521949", "0.54928094", "0.5419348", "0.53862095", "0.53639734", "0.53615487", "0.52910244", "0.5247123", "0.521629", "0.51570433", "0.5146508", "0.5130503", "0.512097", "0.50876564", "0.5079863", "0.50688666", "0.50630695" ]
0.69931936
0
Allocate an IPv4 Subnet. [Arguments]
def fusion_api_allocate_ipv4_subnet(self, body, uri, api=None, headers=None): return self.ipv4subnet.allocate(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')", "def fusion_api_create_ipv4_subnet(self, body, sessionID=None, api=None, headers=None):\n return self.ipv4subnet.create(body, sessionID, api, headers)", "def test_create_host_subnet(self):\n pass", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)", "def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def reserve_ip_subpool(self,\n site_id,\n ipv4DhcpServers=None,\n ipv4DnsServers=None,\n ipv4GateWay=None,\n ipv4GlobalPool=None,\n ipv4Prefix=None,\n ipv4PrefixLength=None,\n ipv4Subnet=None,\n ipv4TotalHost=None,\n ipv6AddressSpace=None,\n ipv6DhcpServers=None,\n ipv6DnsServers=None,\n ipv6GateWay=None,\n ipv6GlobalPool=None,\n ipv6Prefix=None,\n ipv6PrefixLength=None,\n ipv6Subnet=None,\n ipv6TotalHost=None,\n name=None,\n slaacSupport=None,\n type=None,\n headers=None,\n payload=None,\n active_validation=True,\n **request_parameters):\n check_type(headers, dict)\n check_type(payload, dict)\n check_type(site_id, basestring,\n may_be_none=False)\n if headers is not None:\n if 'X-Auth-Token' in headers:\n check_type(headers.get('X-Auth-Token'),\n basestring, may_be_none=False)\n\n _params = {\n }\n _params.update(request_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'siteId': site_id,\n }\n _payload = {\n 'name':\n name,\n 'type':\n type,\n 'ipv6AddressSpace':\n ipv6AddressSpace,\n 'ipv4GlobalPool':\n ipv4GlobalPool,\n 'ipv4Prefix':\n ipv4Prefix,\n 'ipv4PrefixLength':\n ipv4PrefixLength,\n 'ipv4Subnet':\n ipv4Subnet,\n 'ipv4GateWay':\n ipv4GateWay,\n 'ipv4DhcpServers':\n ipv4DhcpServers,\n 'ipv4DnsServers':\n ipv4DnsServers,\n 'ipv6GlobalPool':\n ipv6GlobalPool,\n 'ipv6Prefix':\n ipv6Prefix,\n 'ipv6PrefixLength':\n ipv6PrefixLength,\n 'ipv6Subnet':\n ipv6Subnet,\n 'ipv6GateWay':\n ipv6GateWay,\n 'ipv6DhcpServers':\n ipv6DhcpServers,\n 'ipv6DnsServers':\n ipv6DnsServers,\n 'ipv4TotalHost':\n ipv4TotalHost,\n 'ipv6TotalHost':\n ipv6TotalHost,\n 'slaacSupport':\n slaacSupport,\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation:\n self._request_validator('jsd_cec6c85d9bb4bcc8f61f31296b_v2_2_1')\\\n .validate(_payload)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n e_url = ('/dna/intent/api/v1/reserve-ip-subpool/{siteId}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n json_data = self._session.post(endpoint_full_url, params=_params,\n json=_payload,\n headers=_headers)\n else:\n json_data = self._session.post(endpoint_full_url, params=_params,\n json=_payload)\n\n return self._object_factory('bpm_cec6c85d9bb4bcc8f61f31296b_v2_2_1', json_data)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def create_subnet(self, body=None):\r\n return self.post(self.subnets_path, body=body)", "def subnetpool_create(request, name, prefixes, **kwargs):\n LOG.debug(\"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, \"\n \"kwargs=%(kwargs)s\", {'name': name, 'prefixes': prefixes,\n 'kwargs': kwargs})\n body = {'subnetpool':\n {'name': name,\n 'prefixes': prefixes,\n }\n }\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnetpool'].update(kwargs)\n subnetpool = \\\n neutronclient(request).create_subnetpool(body=body).get('subnetpool')\n return SubnetPool(subnetpool)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])", "def __init__(self, address, netmask=None):\n\n if netmask:\n ip = Ipv4Address(address)\n address = \"%s/%s\" % (ip,netmask)\n\n google.ipaddr.IPv4Network.__init__(self, address, strict=False)", "def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def create_subnet(\n self,\n network_name_or_id,\n cidr=None,\n ip_version=4,\n enable_dhcp=False,\n subnet_name=None,\n tenant_id=None,\n allocation_pools=None,\n gateway_ip=None,\n disable_gateway_ip=False,\n dns_nameservers=None,\n host_routes=None,\n ipv6_ra_mode=None,\n ipv6_address_mode=None,\n prefixlen=None,\n use_default_subnetpool=False,\n **kwargs,\n ):\n\n if tenant_id is not None:\n filters = {'tenant_id': tenant_id}\n else:\n filters = None\n\n network = self.get_network(network_name_or_id, filters)\n if not network:\n raise exc.OpenStackCloudException(\n \"Network %s not found.\" % network_name_or_id\n )\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n if not cidr and not use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr is required when a subnetpool is not used'\n )\n\n if cidr and use_default_subnetpool:\n raise exc.OpenStackCloudException(\n 'arg:cidr must be set to None when use_default_subnetpool == '\n 'True'\n )\n\n # Be friendly on ip_version and allow strings\n if isinstance(ip_version, str):\n try:\n ip_version = int(ip_version)\n except ValueError:\n raise exc.OpenStackCloudException(\n 'ip_version must be an integer'\n )\n\n # The body of the neutron message for the subnet we wish to create.\n # This includes attributes that are required or have defaults.\n subnet = dict(\n {\n 'network_id': network['id'],\n 'ip_version': ip_version,\n 'enable_dhcp': enable_dhcp,\n },\n **kwargs,\n )\n\n # Add optional attributes to the message.\n if cidr:\n subnet['cidr'] = cidr\n if subnet_name:\n subnet['name'] = subnet_name\n if tenant_id:\n subnet['tenant_id'] = tenant_id\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n if ipv6_ra_mode:\n subnet['ipv6_ra_mode'] = ipv6_ra_mode\n if ipv6_address_mode:\n subnet['ipv6_address_mode'] = ipv6_address_mode\n if prefixlen:\n subnet['prefixlen'] = prefixlen\n if use_default_subnetpool:\n subnet['use_default_subnetpool'] = True\n\n return self.network.create_subnet(**subnet)", "def create_subnet ( vpc_conn,\n ec2_conn,\n vpc_id,\n subnet_cidr,\n zone_name,\n subnet_basename ) :\n subnet = vpc_conn.create_subnet( vpc_id, subnet_cidr, zone_name )\n aws_cmd( ec2_conn.create_tags, [ subnet.id,\n { \"Name\": subnet_basename + \"-\" + zone_name[-1].upper( ) + \"-Subnet\" } ] )\n return subnet", "def pre_subnet_create(self, resource_dict):\n pass", "def post_subnet_create(self, resource_dict):\n pass", "def test_create_subnet(self):\n client_token = generate_client_token()\n subnet_name = 'test_subnet_name1' + client_token\n subnet_cidr = '192.168.0.64/26'\n self.assertEqual(\n type(self.the_client.create_subnet(subnet_name,\n 'cn-bj-a',\n subnet_cidr,\n vpc_id,\n client_token=client_token)),\n baidubce.bce_response.BceResponse)", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def test_port_create_with_segment_subnets(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'])\n res = self.deserialize(self.fmt, response)\n # Don't allocate IPs in this case because we didn't give binding info\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def create(self, name, network_id, tenant_id, gateway_ip, cidr, \n allocation_pools=None, enable_dhcp=True, host_routes=None,\n dns_nameservers=['8.8.8.7', '8.8.8.8']):\n data = {\n \"subnet\": {\n \"name\": name,\n \"network_id\": network_id,\n \"tenant_id\": tenant_id,\n \"ip_version\": 4,\n \"cidr\": cidr,\n \"gateway_ip\": gateway_ip,\n }\n }\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers\n\n path = '%s/subnets' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def allocate(self, pool, tenant_id=None, **params):\n if not tenant_id:\n tenant_id = self.request.user.project_id\n create_dict = {'floating_network_id': pool,\n 'tenant_id': tenant_id}\n if 'subnet_id' in params:\n create_dict['subnet_id'] = params['subnet_id']\n if 'floating_ip_address' in params:\n create_dict['floating_ip_address'] = params['floating_ip_address']\n if 'description' in params:\n create_dict['description'] = params['description']\n if 'dns_domain' in params:\n create_dict['dns_domain'] = params['dns_domain']\n if 'dns_name' in params:\n create_dict['dns_name'] = params['dns_name']\n fip = self.client.create_floatingip(\n {'floatingip': create_dict}).get('floatingip')\n self._set_instance_info(fip)\n return FloatingIp(fip)", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def subnetwork_to_ip_range(subnetwork):\n \n try:\n fragments = subnetwork.split('/')\n network_prefix = fragments[0]\n netmask_len = int(fragments[1])\n \n # try parsing the subnetwork first as IPv4, then as IPv6\n for version in (socket.AF_INET, socket.AF_INET6):\n \n ip_len = 32 if version == socket.AF_INET else 128\n \n try:\n suffix_mask = (1 << (ip_len - netmask_len)) - 1\n netmask = ((1 << ip_len) - 1) - suffix_mask\n ip_hex = socket.inet_pton(version, network_prefix)\n ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask\n ip_upper = ip_lower + suffix_mask\n \n return (ip_lower,\n ip_upper,\n 4 if version == socket.AF_INET else 6)\n except:\n pass\n except:\n pass\n \n raise ValueError(\"invalid subnetwork\")", "def test_replace_host_subnet(self):\n pass", "def setNetGroup(addr): #status: Done, not tested\r\n pass" ]
[ "0.70842063", "0.6502744", "0.64276433", "0.6423662", "0.6092746", "0.60817623", "0.6046768", "0.6003364", "0.60029787", "0.5985132", "0.59696525", "0.596162", "0.5850464", "0.5830395", "0.58274764", "0.5786007", "0.5722331", "0.5693126", "0.56781095", "0.56743723", "0.5650694", "0.5644473", "0.5642245", "0.56367606", "0.561063", "0.55847424", "0.5573741", "0.5550027", "0.5530511", "0.5510647" ]
0.70978194
0
Collect an IPv4 Subnet. [Arguments]
def fusion_api_collect_ipv4_subnet(self, body, uri, api=None, headers=None): return self.ipv4subnet.collect(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subnetwork_to_ip_range(subnetwork):\n \n try:\n fragments = subnetwork.split('/')\n network_prefix = fragments[0]\n netmask_len = int(fragments[1])\n \n # try parsing the subnetwork first as IPv4, then as IPv6\n for version in (socket.AF_INET, socket.AF_INET6):\n \n ip_len = 32 if version == socket.AF_INET else 128\n \n try:\n suffix_mask = (1 << (ip_len - netmask_len)) - 1\n netmask = ((1 << ip_len) - 1) - suffix_mask\n ip_hex = socket.inet_pton(version, network_prefix)\n ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask\n ip_upper = ip_lower + suffix_mask\n \n return (ip_lower,\n ip_upper,\n 4 if version == socket.AF_INET else 6)\n except:\n pass\n except:\n pass\n \n raise ValueError(\"invalid subnetwork\")", "def find_subnet(allocated, prefix_len):\n def is_colliding(network, allocations):\n \"\"\"\n Check if given network is colliding with an\n already allocated networks\n \"\"\"\n for allocation in allocations:\n if network.overlaps(allocation):\n return True\n return False\n\n for option in ip_network(FREIFUNK_NET_IP4).subnets(new_prefix=prefix_len):\n if is_colliding(option, allocated):\n continue\n\n yield str(option)", "def test_list_host_subnet(self):\n pass", "def fusion_api_get_ipv4_subnet(self, uri=None, param='', api=None, headers=None):\n return self.ipv4subnet.get(uri=uri, api=api, headers=headers, param=param)", "def subnetting(self):\n ip = netaddr.IPNetwork(addr=self.subnet)\n subnets = list(ip.subnet(prefixlen=24))\n list_subnets = [str(subnet) for subnet in subnets]\n return list_subnets", "def allocate_subnet(self):\n if len(self.subnet_list) == 0:\n subnet = '192.168.1.0/24'\n self.subnet_list.append(subnet)\n return subnet\n else:\n subnet = self.subnet_list[::-1][0]\n ip = ipaddress.IPv4Network(subnet)[0]\n s = ipaddress.IPv4Address(ip) + 256\n return '{}{}'.format(s, '/24')", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def test_read_host_subnet(self):\n pass", "def digest_ips(self):\n all_subnets = {}\n self.subnets = []\n self.single_ips = []\n # extract all subnets\n for ip in self.iplist:\n subnet = self.__get_sutnet(ip)\n if all_subnets.has_key(subnet):\n all_subnets[subnet].append(ip)\n else:\n new_list = [ip]\n all_subnets[subnet] = new_list\n\n for subnet, subnet_ips in all_subnets.items():\n if len(subnet_ips) > 1:\n self.subnets.append(subnet)\n else:\n self.single_ips.append(subnet_ips[0])\n\n self.subnets.sort()\n self.single_ips.sort()", "def test_one_subnet(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/16\", \n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=[\"10.0.1.0/24\"],\n )", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def list_subnet(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing subnet.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"subnet List : %s \" % output)\n return output[\"subnets\"]", "def test_replace_host_subnet(self):\n pass", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def subnet_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_subnet(**kwargs)", "def separate_networks(start, end, cidr):\n networks = []\n start_net = IPNetwork(f'{start}/{cidr}')\n end = IPNetwork(f'{end}/{cidr}')\n working_net = start_net\n LOG.info(f'Start net: {start_net}')\n while working_net < end + 1:\n LOG.debug(f'Adding network {working_net}')\n networks.append(working_net)\n working_net = working_net + 1\n return networks", "def generate_subnets(parent_cidr, existing_cidrs, prefix, count):\n subnets = []\n for new_cidr in _generate_subnets(parent_cidr, existing_cidrs, prefix):\n subnets.append(str(new_cidr))\n if len(subnets) == count:\n break\n return subnets", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def get_subnet_output(expand: Optional[pulumi.Input[Optional[str]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnet_name: Optional[pulumi.Input[str]] = None,\n virtual_network_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubnetResult]:\n ...", "def __sub__(self, other):\n if not isinstance(other, Subnet):\n raise ValueError(\"I'm sorry, but I'm afraid I cannot do that\")\n\n if other.subnet_mask < self.subnet_mask:\n raise ValueError(\"We cannot subtract from a subnetmask greater than out own\")\n\n results = []\n\n for subnet_mask in reversed(range(self.subnet_mask + 1, other.subnet_mask + 1)):\n mask_bits = 2 ** (32 - subnet_mask) # Get the new mask\n new_subnet_number = other.address_int ^ mask_bits # Calculate the new IP range\n new_subnet_number &= ~(mask_bits - 1) # Discard all bits that no longer subnet, but are now addresses\n new_subnet = Subnet(ip_number=new_subnet_number, subnet_mask=subnet_mask)\n\n results.append(new_subnet)\n\n return results", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def test_network_full(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=None,\n )", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_create_host_subnet(self):\n pass", "def fusion_api_create_ipv4_subnet(self, body, sessionID=None, api=None, headers=None):\n return self.ipv4subnet.create(body, sessionID, api, headers)", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def list_subnets(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"distinguishedName\", \"name\", \"description\"]\n else:\n attributes = ALL\n\n if verbose:\n self.display(\n self.engine.query(\n self.engine.SITES_FILTER(),\n attributes, base=','.join([\"CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )\n else:\n entries = self.engine.query(self.engine.SITES_FILTER(), attributes, base=','.join([\"CN=Configuration\", self.engine.base_dn]))\n\n site_dn = \"\"\n site_name = \"\"\n site_description = \"\"\n # subnet_dn = \"\"\n subnet_name = \"\"\n subnet_description = \"\"\n for entry in entries:\n site_dn = entry[\"distinguishedName\"] if entry[\"distinguishedName\"] else \"\"\n site_name = entry[\"name\"] if entry[\"name\"] else \"\"\n site_description = entry[\"description\"][0] if entry[\"description\"] else \"\"\n subnet_entries = self.engine.query(self.engine.SUBNET_FILTER(site_dn), attributes, base=','.join([\"CN=Sites,CN=Configuration\", self.engine.base_dn]))\n for subnet in subnet_entries:\n # subnet_dn = subnet[\"distinguishedName\"] if subnet[\"distinguishedName\"] else \"\"\n subnet_name = subnet[\"name\"] if subnet[\"name\"] else \"\"\n subnet_description = subnet[\"description\"][0] if subnet[\"description\"] else \"\"\n servers = self.engine.query(\"(objectClass=server)\", ['cn'], base=site_dn)\n servers_list = [d['cn'] for d in servers]\n\n output = \"Site: {}\".format(site_name)\n output += \" | Subnet: {}\".format(subnet_name) if subnet_name else \"\"\n output += \" | Site description: {}\".format(site_description) if site_description else \"\"\n output += \" | Subnet description: {}\".format(subnet_description) if subnet_description else \"\"\n output += \" | Servers: {}\".format(', '.join(servers_list)) if servers_list else \"\"\n print(output)", "def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list", "def test_list_subnets(self):\n print(self.the_client.list_subnets())", "def extend_list(self, data, parsed_args):\r\n neutron_client = self.get_client()\r\n search_opts = {'fields': ['id', 'cidr']}\r\n if self.pagination_support:\r\n page_size = parsed_args.page_size\r\n if page_size:\r\n search_opts.update({'limit': page_size})\r\n subnet_ids = []\r\n for n in data:\r\n if 'subnets' in n:\r\n subnet_ids.extend(n['subnets'])\r\n\r\n def _get_subnet_list(sub_ids):\r\n search_opts['id'] = sub_ids\r\n return neutron_client.list_subnets(\r\n **search_opts).get('subnets', [])\r\n\r\n try:\r\n subnets = _get_subnet_list(subnet_ids)\r\n except exceptions.RequestURITooLong as uri_len_exc:\r\n # The URI is too long because of too many subnet_id filters\r\n # Use the excess attribute of the exception to know how many\r\n # subnet_id filters can be inserted into a single request\r\n subnet_count = len(subnet_ids)\r\n max_size = ((self.subnet_id_filter_len * subnet_count) -\r\n uri_len_exc.excess)\r\n chunk_size = max_size / self.subnet_id_filter_len\r\n subnets = []\r\n for i in range(0, subnet_count, chunk_size):\r\n subnets.extend(\r\n _get_subnet_list(subnet_ids[i: i + chunk_size]))\r\n\r\n subnet_dict = dict([(s['id'], s) for s in subnets])\r\n for n in data:\r\n if 'subnets' in n:\r\n n['subnets'] = [(subnet_dict.get(s) or {\"id\": s})\r\n for s in n['subnets']]" ]
[ "0.615925", "0.5743858", "0.5685825", "0.55463344", "0.5541282", "0.5536297", "0.5508559", "0.5336481", "0.5257907", "0.5243746", "0.52319306", "0.52170366", "0.5175911", "0.5145608", "0.51012444", "0.5073665", "0.5067929", "0.5063898", "0.5050254", "0.50450164", "0.5042065", "0.5036371", "0.5033514", "0.5030492", "0.50077516", "0.5005004", "0.4990224", "0.49729657", "0.49699855", "0.49653873" ]
0.64564615
0
Patch an IPv4 Subnet. [Arguments]
def fusion_api_patch_ipv4_subnet(self, body, uri, param='', api=None, headers=None): return self.ipv4subnet.patch(body, uri, param, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_patch_host_subnet(self):\n pass", "def fusion_api_edit_ipv4_subnet(self, body, uri, api=None, headers=None):\n return self.ipv4subnet.update(body, uri, api, headers)", "def test_replace_host_subnet(self):\n pass", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def patch_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def test_update_subnet(self):\n self.assertEqual(\n type(self.the_client.update_subnet(subnet_id, 'test_update_name1',\n 'test_update_description1')),\n baidubce.bce_response.BceResponse)", "def replace_namespaced_host_subnet(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_host_subnet`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_host_subnet`\")\n\n resource_path = '/oapi/v1/hostsubnets/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnet',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def update(self, oid, name=None, network_id=None, tenant_id=None, \n gateway_ip=None, cidr=None, allocation_pools=None, \n enable_dhcp=None, host_routes=None, dns_nameservers=None):\n data = {\n \"subnet\": {\n }\n }\n \n if network_id is not None:\n data['subnet']['network_id'] = network_id\n if tenant_id is not None:\n data['subnet']['tenant_id'] = tenant_id\n if cidr is not None:\n data['subnet']['cidr'] = cidr\n if gateway_ip is not None:\n data['subnet']['gateway_ip'] = gateway_ip\n if name is not None:\n data['subnet']['name'] = name\n if allocation_pools is not None:\n data['subnet']['allocation_pools'] = allocation_pools\n if host_routes is not None:\n data['subnet']['host_routes'] = host_routes\n if enable_dhcp is not None:\n data['subnet']['enable_dhcp'] = enable_dhcp\n if dns_nameservers is not None:\n data['subnet']['dns_nameservers'] = dns_nameservers \n \n path = '%s/subnets/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack subnet: %s' % truncate(res))\n return res[0]['subnet']", "def subnet_update_end(self, payload):\n network_id = payload['subnet']['network_id']\n self.refresh_dhcp_helper(network_id)", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)", "def pre_subnet_update(self, resource_id, resource_dict):\n pass", "def update_subnet_postcommit(self, context):\n if self.rpc_handler is None:\n return\n subnet = self._get_subnet_info(context._subnet)\n if subnet is not None:\n try:\n self.rpc_handler.update_subnet(subnet)\n except:\n pass", "def rule_40_extend_subnet_cidr(session):\n\n config, conn = session[\"config\"], session[\"conn\"]\n\n def append_cidr(config_side, conn_vpc):\n\n cidr = conn_vpc.get_all_subnets([\n config_side[\"res\"][\"subnet_id\"]\n ])[0].cidr_block\n\n for user_cidr in config_side[\"ipsec\"][\"subnets\"]:\n if cidr_overlaps(cidr, user_cidr):\n return\n\n config_side[\"ipsec\"][\"subnets\"].append(cidr)\n\n append_cidr(config[\"server\"], conn[\"server\"](\"vpc\"))\n append_cidr(config[\"client\"], conn[\"client\"](\"vpc\"))\n\n return True", "def test_patch_net_namespace(self):\n pass", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def test_delete_host_subnet(self):\n pass", "def test_in_subnet():\n with patch.object(salt.utils.network, \"in_subnet\", MagicMock(return_value=True)):\n assert win_network.in_subnet(\"10.1.1.0/16\")", "def test_netlookup_subtract_add_no_subnets(monkeypatch):\n script = NetLookupScript()\n testargs = ['netlookup', 'subtract', NETWORKS_ARG]\n with monkeypatch.context() as context:\n validate_script_run_exception_with_args(script, context, testargs, exit_code=1)", "def update_subnet(\n self,\n name_or_id,\n subnet_name=None,\n enable_dhcp=None,\n gateway_ip=None,\n disable_gateway_ip=None,\n allocation_pools=None,\n dns_nameservers=None,\n host_routes=None,\n ):\n subnet = {}\n if subnet_name:\n subnet['name'] = subnet_name\n if enable_dhcp is not None:\n subnet['enable_dhcp'] = enable_dhcp\n if gateway_ip:\n subnet['gateway_ip'] = gateway_ip\n if disable_gateway_ip:\n subnet['gateway_ip'] = None\n if allocation_pools:\n subnet['allocation_pools'] = allocation_pools\n if dns_nameservers:\n subnet['dns_nameservers'] = dns_nameservers\n if host_routes:\n subnet['host_routes'] = host_routes\n\n if not subnet:\n self.log.debug(\"No subnet data to update\")\n return\n\n if disable_gateway_ip and gateway_ip:\n raise exc.OpenStackCloudException(\n 'arg:disable_gateway_ip is not allowed with arg:gateway_ip'\n )\n\n curr_subnet = self.get_subnet(name_or_id)\n if not curr_subnet:\n raise exc.OpenStackCloudException(\n \"Subnet %s not found.\" % name_or_id\n )\n\n return self.network.update_subnet(curr_subnet, **subnet)", "def test_patch_port_sub_group(self):\n pass", "def test_change_subnet(self):\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.11.12.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n ippool2 = NetworkIpPool.objects.create(\n network='10.10.11.0/24',\n kind=NetworkIpPoolKind.NETWORK_KIND_INTERNET,\n description='test',\n ip_start='10.10.11.2',\n ip_end='10.10.11.254',\n gateway='10.10.11.1',\n is_dynamic=True\n )\n self.ippool.groups.remove(self.group)\n ippool2.groups.add(self.group)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:7',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.3')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)\n\n lease = CustomerIpLeaseModel.fetch_subscriber_lease(\n customer_mac='1:2:3:4:5:6',\n device_mac='12:13:14:15:16:17',\n device_port=2,\n is_dynamic=True\n )\n self.assertIsNotNone(lease)\n self.assertEqual(lease.ip_address, '10.10.11.2')\n self.assertEqual(lease.customer, self.customer)\n self.assertTrue(lease.is_dynamic)", "def test_lo_interface_tc4_replace(duthost):\n json_patch = [\n {\n \"op\": \"remove\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|FC00:1::32~1128\"\n },\n {\n \"op\": \"remove\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|10.1.0.32~132\"\n },\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|10.1.0.33~132\",\n \"value\": {}\n },\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|FC00:1::33~1128\",\n \"value\": {}\n }\n ]\n\n tmpfile = generate_tmpfile(duthost)\n logger.info(\"tmpfile {}\".format(tmpfile))\n\n try:\n output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile)\n expect_op_success(duthost, output)\n\n check_show_ip_intf(duthost, \"Loopback0\", [\"10.1.0.33/32\"], [\"10.1.0.32/32\"], is_ipv4=True)\n check_show_ip_intf(duthost, \"Loopback0\", [\"fc00:1::33/128\"], [\"fc00:1::32/128\"], is_ipv4=False)\n finally:\n delete_tmpfile(duthost, tmpfile)", "def test_subnets():\n with patch.object(\n salt.utils.network, \"subnets\", MagicMock(return_value=\"10.1.1.0/24\")\n ):\n assert win_network.subnets() == \"10.1.1.0/24\"", "def test_create_host_subnet(self):\n pass", "def post_save_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.add_or_update_entry(subnet=str(instance.ip_network), net_name=instance.name)", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def set_network(self, addr, netmask, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set_network(self.map, addr, netmask, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set_network(self.map, addr, netmask, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")" ]
[ "0.7028762", "0.65259534", "0.65172523", "0.64451545", "0.61605704", "0.60011065", "0.5753818", "0.57437485", "0.5633353", "0.5616337", "0.5549721", "0.5446545", "0.5417368", "0.53922504", "0.5385309", "0.53409284", "0.53266454", "0.5318294", "0.52781475", "0.52667135", "0.52118355", "0.52005976", "0.51846653", "0.51819986", "0.51648587", "0.5128437", "0.5126824", "0.511383", "0.51093096", "0.50816476" ]
0.7404452
0
Creates a VMAC Range. [Arguments]
def fusion_api_create_vmac_range(self, body, api=None, headers=None): return self.vmacrange.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)", "def create(self, range):\n raise NotImplementedError", "def Range(self, from: int, to: int) -> BaseVector:", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def create(self, range_value):\n return product(range(2), repeat=range_value[0])", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def create_instance(c_instance):\n return VCM600(c_instance)", "def create_instance(c_instance):\n return VCM600(c_instance)", "def fusion_api_create_vsn_range(self, body, api=None, headers=None):\n return self.vsnrange.create(body, api, headers)", "def vrange(starts, stops):\n stops = np.asarray(stops)\n l = stops - starts # Lengths of each range.\n return np.repeat(stops - l.cumsum(), l) + np.arange(l.sum()), l.cumsum()", "def fusion_api_create_vwwn_range(self, body, api=None, headers=None):\n return self.vwwnrange.create(body, api, headers)", "def generate_mac_addr(self):\n\t\tcall_sdk_function('PrlVmDevNet_GenerateMacAddr', self.handle)", "def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)", "def make_clock(effective_lower: dt.datetime,\n vclock_lower: int,\n **kwargs) -> _ClockSet:\n effective_upper = kwargs.get('effective_upper', None)\n vclock_upper = kwargs.get('vclock_upper', None)\n\n effective = psql_extras.DateTimeTZRange(\n effective_lower, effective_upper)\n vclock = psql_extras.NumericRange(vclock_lower, vclock_upper)\n\n return _ClockSet(effective, vclock)", "def fusion_api_collect_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.collect(body, uri, api, headers)", "def fusion_api_allocate_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.allocate(body, uri, api, headers)", "def bdev_opal_create(client, nvme_ctrlr_name, nsid, locking_range_id, range_start, range_length, password):\n params = {\n 'nvme_ctrlr_name': nvme_ctrlr_name,\n 'nsid': nsid,\n 'locking_range_id': locking_range_id,\n 'range_start': range_start,\n 'range_length': range_length,\n 'password': password,\n }\n\n return client.call('bdev_opal_create', params)", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def create_static_macs(self, port=None, vlans=None, macs=None):\n pass", "def fusion_api_create_ipv4_range(self, body, api=None, headers=None):\n return self.ipv4range.create(body, api, headers)", "def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")", "def fusion_api_edit_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.update(body, uri, api, headers)", "def make_voigbg(w,minZ,maxZ,m=mz0,fixw=False):\n cmds = []\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('width[2.49,0,5.0]')\n cmds.append('sigma[1,0,5.0]')\n cmds.append('expar[-0.1,-1,0]')\n cmds.append(\"RooVoigtian::voig(x,m,width,sigma)\")\n cmds.append(\"RooExponential::exp(x,expar)\")\n cmds.append('nvoig[1,0,1000000]')\n cmds.append('nexp[1,0,1000000]')\n cmds.append(\"SUM::voigbg(nvoig*voig,nexp*exp)\")\n [w.factory(cmd) for cmd in cmds]\n if fixw:\n w.var('width').setConstant(kTRUE) if w.var('width') else None\n return w.pdf('voigbg'), kTRUE", "def make_tenant_vlan(name, ip, vid, interface):\n\n script = '\\n'.join([\n 'name={}',\n 'ip={}',\n 'vid={}',\n 'interface={}',\n '',\n '#',\n '# Binding br_ext to $interface',\n '#',\n 'sudo brctl addbr br_ext',\n 'sudo ip link set dev br_ext up',\n 'sudo brctl addif br_ext $interface',\n '',\n '#',\n '# Creating a namespace with $name with $ip',\n '# ',\n '',\n 'sudo ip netns add $name',\n 'sudo brctl addbr br_$name',\n 'sudo ip link set dev br_$name up',\n 'sudo ip link add veth0 type veth peer name veth0_$name ',\n 'sudo ip link set veth0 netns $name',\n 'sudo ip netns exec $name ip link set dev veth0 up',\n 'sudo ip netns exec $name ifconfig veth0 $ip netmask 255.255.255.0 up',\n 'sudo ip link set dev veth0_$name up',\n '',\n '#',\n '# Binding VID $vid to br_$name',\n '# Binding veth0_$name to br_$name',\n '#',\n 'sudo ip link add link br_ext br_ext.$vid type vlan id $vid',\n 'sudo ip link set dev br_ext.$vid up',\n 'sudo brctl addif br_$name veth0_$name',\n 'sudo brctl addif br_$name br_ext.$vid',\n ]).format(name, ip, vid, interface)\n return run_script(script)", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def create_instance(c_instance):\n return AumPC40(c_instance)", "def boundaries_new(*args):\n return _ida_hexrays.boundaries_new(*args)", "def _build_robovac_command(mode, command):\n mcu_ota_header_0xa5 = 0xA5\n cmd_data = (mode.value + command.value)\n\n return bytes([mcu_ota_header_0xa5, mode.value, command.value, cmd_data, 0xFA])", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def gen_ipam_subnet(ip_prefix, ip_prefix_len, default_gateway):\n subnet = SubnetType(ip_prefix=ip_prefix, ip_prefix_len=ip_prefix_len)\n ipam_subnet = IpamSubnetType(subnet=subnet, default_gateway=default_gateway)\n vn_subnet = VnSubnetsType(ipam_subnets=[ipam_subnet])\n return vn_subnet" ]
[ "0.63431716", "0.6129839", "0.55962014", "0.5358623", "0.53351486", "0.5334318", "0.5210957", "0.5210957", "0.519095", "0.5118204", "0.5081868", "0.50646985", "0.5025402", "0.5024295", "0.50133705", "0.49907547", "0.49409258", "0.49141517", "0.48887545", "0.4887402", "0.4887179", "0.48416504", "0.483644", "0.4828785", "0.47978202", "0.47946748", "0.47920543", "0.47534698", "0.47494298", "0.47480547" ]
0.70753956
0
Updates an VMAC Range. [Arguments]
def fusion_api_edit_vmac_range(self, body, uri, api=None, headers=None): return self.vmacrange.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_create_vmac_range(self, body, api=None, headers=None):\n return self.vmacrange.create(body, api, headers)", "def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")", "def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def update(self, v_input):\n\n self.v = v_input", "def update_frame(self, key, ranges=None):", "def __setitem__(self, *args):\n return _uhd_swig.range_vector_t___setitem__(self, *args)", "def update(self, v, r):\n pass", "def fusion_api_edit_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.update(body, uri, api, headers)", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits", "def update_V(self, Vs, Vs_next, reward, alpha, gamma):\r\n return Vs + alpha * (reward + gamma * Vs_next - Vs)", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_edit_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.update(body, uri, api, headers)", "def set_accel_range(self, accel_range):\r\n # First change it to 0x00 to make sure we write the correct value later\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, 0x00)\r\n\r\n # Write the new range to the ACCEL_CONFIG register\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, accel_range)", "def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))", "def set_accel_range(self, accel_range):\n\t\t# First change it to 0x00 to make sure we write the correct value later\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n\t\t# Write the new range to the ACCEL_CONFIG register\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)", "def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)", "def update(self):\n self._sync_ranges()\n self._update_params()", "def reconfigure_ml2_vlan_range(self):\n self.check_run('reconfigure_ml2_vlan_range')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('neutron')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_name = 'neutron-server'\n uptimes = self.get_service_uptime(controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.env.make_snapshot(\"reconfigure_ml2_vlan_range\", is_make=True)", "def set_accel_range(self, accel_range):\n # First change it to 0x00 to make sure we write the correct value later\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n # Write the new range to the ACCEL_CONFIG register\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)", "def fusion_api_collect_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.collect(body, uri, api, headers)", "def update(\n self,\n Count=None,\n Dhcp4EchoRelayInfo=None,\n Dhcp6IaType=None,\n Enabled=None,\n IpAddress=None,\n IpAddressIncrement=None,\n IpAddressPoolIncrement=None,\n IpAddressPrefix=None,\n IpAddressPrefixIncrement=None,\n IpAddressPrefixPoolIncrement=None,\n IpDns1=None,\n IpDns2=None,\n IpGateway=None,\n IpGatewayIncrement=None,\n IpPrefix=None,\n IpType=None,\n Name=None,\n PrefixCount=None,\n PrefixLength=None,\n ServerAddress=None,\n ServerAddressIncrement=None,\n ServerCount=None,\n ServerGateway=None,\n ServerGatewayIncrement=None,\n ServerPrefix=None,\n UseRapidCommit=None,\n ):\n # type: (int, bool, str, bool, str, str, str, str, str, str, str, str, str, str, int, str, str, int, int, str, str, int, str, str, int, bool) -> DhcpServerRange\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def update(self, val, feats):\n raise NotImplementedError", "def fusion_api_patch_ipv4_range(self, body, uri, param='', api=None, headers=None):\n return self.ipv4range.patch(body, uri, param, api, headers)", "def update(*args):", "def setRange(self, x_range, y_range):\n pass", "def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)", "def update(\n self,\n ECI=None,\n Enabled=None,\n MCC=None,\n MNC=None,\n Name=None,\n ParentMme=None,\n ParentSgw=None,\n RAILAC=None,\n RAIMCC1=None,\n RAIMCC2=None,\n RAIMCC3=None,\n RAIMNC1=None,\n RAIMNC2=None,\n RAIMNC3=None,\n RAIRAC=None,\n TAC=None,\n ):\n # type: (str, bool, str, str, str, str, str, str, int, int, int, int, int, int, str, str) -> EgtpNbS5S8Range\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def _modify_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n modifier: Callable[[str, int, str], Any],\n datacenter_id: int,\n datacenter_name: str,\n) -> List[Any]:\n if (not start_letter.isalpha) or (not stop_letter.isalpha):\n raise InvalidRangeError\n\n if start_number < 1 or start_number > stop_number:\n raise InvalidRangeError\n\n if start_letter.upper() > stop_letter.upper():\n raise InvalidRangeError\n\n alphabet: str = string.ascii_uppercase\n letters: str = alphabet[\n alphabet.index(start_letter.upper()) : alphabet.index(stop_letter.upper()) + 1\n ]\n\n results: List[Any] = []\n try:\n for letter in letters:\n for number in range(start_number, stop_number + 1):\n label = f\"{letter}{number}\"\n results.append(modifier(label, datacenter_id, datacenter_name))\n except (\n DBWriteException,\n InvalidRangeError,\n RackNotEmptyError,\n RackDoesNotExistError,\n ):\n raise\n\n return results" ]
[ "0.58151", "0.57016224", "0.5549422", "0.5420304", "0.5300636", "0.526787", "0.52065486", "0.5165738", "0.5119776", "0.5067113", "0.5030975", "0.50154567", "0.5014251", "0.5004153", "0.49768084", "0.49618125", "0.4955901", "0.49548832", "0.49462095", "0.4911806", "0.48930067", "0.4868263", "0.47982433", "0.47654697", "0.4739317", "0.47276038", "0.47256455", "0.47132075", "0.46956536", "0.4695262" ]
0.68932426
0
Deletes a VMAC range based on name OR uri. [Arguments]
def fusion_api_delete_vmac_range(self, name=None, uri=None, api=None, headers=None): return self.vmacrange.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)", "def fusion_api_delete_vwwn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vwwnrange.delete(name, uri, api, headers)", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def delete_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_delete_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def deleteAddressRange(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def Multi_vm_delete(self, vm_name, s_range, e_range):\n try:\n for i in range(s_range, e_range):\n new_name = vm_name + \"%s\" % i\n self.destroy_vm(new_name)\n except Exception as error:\n print(error.message)\n raise error", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def DeleteRange(self, r):\n self.__context.builder.DocumentDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end)\n left = self._blip_data.content[:r.start]\n right = self._blip_data.content[r.end + 1:]\n self._blip_data.content = left + right", "def delete(self, uri, where, selectionArgs):\n pass", "def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None", "def unlink(address):", "def delete(fits: Optional[str], start: Optional[str], end: Optional[str], out: Optional[str]):\n delete_in_ssda(fits=fits, start=start, end=end, out=out)", "def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)", "def UnsafeDestroyRange(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_static_mac(self, port=None, vlan=None, mac=None):\n pass", "def del_reservation(self, src, dst):\n\n # PART 1, TASK 4.1 remove the reservation from the switch, controller and update links capacities.", "def deleteAttributeRange(self, startKey=None, endKey=None, limit=None):\n self.graph.deleteExtendedAttributeRange(entityId, startKey, endKey, limit)", "def delete_book(code: str):\n pass", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4subnet.delete(name, uri, api, headers)", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)", "def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)", "def DeleteRange(self, rangeText, silent=False):\n startTaxon, stopTaxon, startColumn, stopColumn = self.ParseIndex(rangeText)\n if (self.translated == True):\n startColumn = startColumn * 3\n stopColumn = (stopColumn * 3) + 2\n if (startTaxon >= 0): #Make sure we had a valid range\n changeLength = 0\n deleteTaxon = False\n if ((startColumn == 0) & (stopColumn == len(self.alignment[0]) - 1)):\n deleteTaxon = True\n if ((startTaxon > 0) | (stopTaxon < len(self.alignment) - 1)):\n changeLength = (stopColumn - startColumn) + 1\n taxon = 0\n newSequences = []\n for Sequence in self.alignment:\n if (taxon in range(startTaxon, stopTaxon + 1)):\n if (not deleteTaxon):\n if (startColumn > 0):\n Sequence.seq = Sequence.seq[:startColumn] + Sequence.seq[stopColumn + 1:]\n else:\n Sequence.seq = Sequence.seq[stopColumn + 1:]\n if (changeLength):\n Sequence.seq = Sequence.seq + Seq('-' * changeLength)\n newSequences.append(Sequence)\n else:\n newSequences.append(Sequence)\n taxon += 1\n self.alignment = MultipleSeqAlignment(newSequences)\n if (not silent):\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def DeleteAnnotationsInRange(self, r, name):\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end,\n name)\n # TODO(davidbyttow): split local annotations.", "def fusion_api_remove_datacenter(self, name=None, uri=None, api=None, headers=None):\n return self.dc.delete(name, uri, api, headers)" ]
[ "0.76067543", "0.71442324", "0.70280826", "0.66786194", "0.6668879", "0.60728115", "0.579161", "0.57750773", "0.5742358", "0.57260656", "0.56273866", "0.5563267", "0.55164564", "0.5479828", "0.5474847", "0.54076487", "0.54076475", "0.53566474", "0.53420115", "0.5321254", "0.5318883", "0.5307956", "0.529071", "0.5289548", "0.5279491", "0.5270834", "0.52633864", "0.5241462", "0.52315795", "0.5230805" ]
0.79944247
0
Gets a default or paginated collection of VMAC Ranges. [Arguments]
def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None): return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_collect_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.collect(body, uri, api, headers)", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def ranges(self):\n return self._ranges", "def range(self):\n return self.range_array", "def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng", "def get_ranges( # type: ignore\n self, start_range=None, # type: Optional[int]\n end_range=None, # type: Optional[int]\n timeout=None, # type: Optional[int]\n **kwargs\n ):\n # type: (...) -> List[dict[str, int]]\n if self.require_encryption or (self.key_encryption_key is not None):\n raise ValueError(\"Unsupported method for encryption.\")\n\n content_range = None\n if start_range is not None:\n if end_range is not None:\n content_range = 'bytes={0}-{1}'.format(start_range, end_range)\n else:\n content_range = 'bytes={0}-'.format(start_range)\n try:\n ranges = self._client.file.get_range_list(\n sharesnapshot=self.snapshot,\n timeout=timeout,\n range=content_range,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)\n return [{'start': b.start, 'end': b.end} for b in ranges]", "def range() -> List[int]:\n pass", "def ranges(self) -> List[Range]:\n return list(iter(self._ranges))", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def discoverRanges(self):\n iprange = self.options.range\n if isinstance(iprange, basestring):\n iprange = [iprange]\n # in case someone uses 10.0.0.0-5,192.168.0.1-5 instead of\n # --range 10.0.0.0-5 --range 192.168.0.1-5\n if isinstance(iprange, list) and iprange[0].find(\",\") > -1:\n iprange = [n.strip() for n in iprange[0].split(\",\")]\n ips = []\n for rangelimit in iprange:\n # Parse to find ips included\n ips.extend(parse_iprange(rangelimit))\n results = yield self.pingMany(ips)\n goodips, badips = _partitionPingResults(results)\n self.log.debug(\n \"Found %d good IPs and %d bad IPs\", len(goodips), len(badips)\n )\n devices = yield self.discoverDevices(goodips)\n self.log.info(\"Discovered %d active IPs\", len(goodips))\n defer.returnValue(devices)", "def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)", "def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))", "def EnergyRanges(self, default=[None]):\n return self.data.get('metadata', {}).get('energy_ranges', default)", "def get_range(value):\n return list(range(value))", "def get_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> List[JSON]:\n return _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_get_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def get_range( value ):\n return list(range(value))", "def test_get_range(self):\n pass", "def byrange(self, start, stop):\n\t\treturn ElementsByRange(self.AEM_want, self, (start, stop))", "def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def fusion_api_create_vmac_range(self, body, api=None, headers=None):\n return self.vmacrange.create(body, api, headers)", "def getAFeRange(brand):\n return afe_range[brand]", "def fusion_api_get_fabric_reserved_vlan_range(self, uri=None, param='', api=None, headers=None):\n param = \"/reserved-vlan-range%s\" % param\n return self.fabric.get(uri=uri, api=api, headers=headers, param=param)" ]
[ "0.6278908", "0.6129527", "0.57914954", "0.5772542", "0.56672585", "0.5618746", "0.5594485", "0.55870026", "0.5582523", "0.5571187", "0.5571187", "0.5571187", "0.5571187", "0.5559346", "0.55547", "0.55458707", "0.5543781", "0.554281", "0.5541446", "0.55319786", "0.55218315", "0.5521323", "0.5520206", "0.55031145", "0.55031145", "0.55031145", "0.55031145", "0.5487513", "0.54836375", "0.5448723" ]
0.70085233
0
Returns all fragments that have been allocated from a VMAC Range [Arguments]
def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None): return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_collect_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.collect(body, uri, api, headers)", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_allocate_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.allocate(body, uri, api, headers)", "def list_fragments(self):\n return list(self.data.fragments)", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def vaccinations(self, from_date: str, to_date: str) -> VaccinationList:\n params = {'date_from': from_date, 'date_to': to_date}\n data = self.get(\"mdg_emvolio\", params=params)\n\n ls = [Vaccination(**area) for area in data]\n return VaccinationList(items=ls)", "def fragment_length_filter(fragment_anno_dic):\n out_list = []\n total_fragment = 0\n for key in fragment_anno_dic.keys():\n #print fragment_anno_dic[key]\n fragments_flag = []\n fragments_length = []\n fragments_region = []\n total_fragment += int(fragment_anno_dic[key][0][-3])\n reads_coverage = [x[-3] for x in fragment_anno_dic[key]]\n if len(list(set(reads_coverage))) != 1:\n print (fragment_anno_dic[key])\n if len(fragment_anno_dic[key]) == 1:\n fragment_anno_dic[key][0] = list(fragment_anno_dic[key][0])\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n for i in range(0,len(fragment_anno_dic[key])):\n fragment_anno_dic[key][i] = list(fragment_anno_dic[key][i])\n iso = fragment_anno_dic[key][i]\n iso_length = sum([int(x) for x in iso[10].split(',')])\n fragments_length.append(iso_length)\n fragments_flag.append(iso[-2])\n fragments_region.append(iso[8])\n #print fragment_anno_dic[key]\n#---------------------------------------------------------------- complete fragments (Set region preference)\n region_complete = [''] * len(fragments_flag)\n max_flag = max(fragments_flag)\n #print fragments_length,fragments_region,fragments_flag\n if max_flag == 3:\n for x in range(0,len(fragments_flag)):\n if fragments_flag[x] == max_flag:\n fragment_anno_dic[key][x][-2] = str(fragment_anno_dic[key][x][-2])\n region_complete[x] = fragments_region[x]\n # Set preference\n if 'CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS')]))\n elif '5UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR')]))\n elif '3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('3UTR')]))\n elif '5UTR-CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS-3UTR')]))\n elif 'intron' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('Null')]))\n else:\n print (fragment_anno_dic[key])\n print ('Gene type error!')\n#----------------------------------------------------------------- incomplete fragments (choose the longest fragments)\n elif max_flag == 2:\n max_length_list = [0] * len(fragments_length)\n max_region_list = [''] * len(fragments_length)\n for y in range(0,len(fragments_flag)):\n if fragments_flag[y] == max_flag:\n max_length_list[y] = fragments_length[y]\n #print max_length_list\n max_length = max(max_length_list)\n #print max_length\n for z in range(0,len(max_length_list)):\n if max_length_list[z] == max_length:\n fragment_anno_dic[key][z][-2] = str(fragment_anno_dic[key][z][-2])\n max_region_list[z] = fragments_region[z]\n #print max_region_list\n # Set preference\n if 'CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS')]))\n elif '5UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR')]))\n elif '3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('3UTR')]))\n elif '5UTR-CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS-3UTR')]))\n elif 'intron' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('Null')]))\n elif max_flag == 1: #Not annotated to exon region\n fragment_anno_dic[key][fragments_flag.index(1)][-2] = str(fragment_anno_dic[key][fragments_flag.index(1)][-2])\n # print (fragment_anno_dic[key])\n out_list.append('\\t'.join(fragment_anno_dic[key][fragments_flag.index(1)]))\n elif max_flag == 0: #Not annotated to intragenic region\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n print (fragment_anno_dic[key])\n print ('Please check flag information')\n print ('Total fragments after filtering 1: ' + str(total_fragment))\n return out_list", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fragments(self):\n return len(self.byteruns())", "def populate_ranges(self,):\n self.ranges = list()\n # coredump: info target shows all sections in full detail\n # live debug: only file-backed sections are shown\n targetinfo = gdb.execute(\"info target\", False, True)\n for line in targetinfo.splitlines():\n line = line.strip()\n if line.startswith('`'):\n line = line.split(\"'\")[1]\n source = line[1:]\n continue\n if not line.startswith(\"0x\"):\n continue\n\n start, dash, end, str_is, memtype = line.split(maxsplit=4)\n assert(dash == '-' and str_is == 'is')\n start = int(start, 16)\n end = int(end, 16)\n new_range = MemoryRange(start, end-start, source, memtype)\n startoverlap = self.get_range(start)\n endoverlap = self.get_range(end)\n\n if endoverlap == startoverlap:\n endoverlap = None\n\n #TODO: splitup and punch holes/replace\n if memtype.startswith('.'):\n # gdb reports loadXXX sections on top of file-backed sections of the binary\n # probably because the kernel maps writeable pages on top of them\n # Therefore, keep the more accurate description from the file-backed section\n if startoverlap is not None and startoverlap.memtype == MemoryType.General:\n previous, current = self.split_range_at(start)\n self.ranges.remove(current)\n startoverlap = None\n if endoverlap is not None and endoverlap.memtype == MemoryType.General:\n current, end = self.split_range_at(end)\n self.ranges.remove(current)\n endoverlap = None\n\n if startoverlap is not None and endoverlap is not None:\n print(\"Overlapping memory ranges: %s in %s -> %s\" %\n (new_range, str(startoverlap), str(endoverlap)))\n bisect.insort(self.ranges, new_range)\n\n # live target: run-time allocated memory and some file-backed sections\n # There typically is overlap with the 'info target' output, so give precedence\n # to the previously added ranges\n mappinginfo = gdb.execute(\"info proc mappings\", False, True)\n for line in mappinginfo.splitlines():\n line = line.strip()\n if not line.startswith(\"0x\"):\n continue\n\n items = line.split()\n if len(items) == 4:\n start, end, size, offset = items\n source = \"unknown\"\n elif len(items) == 5:\n start, end, size, offset, source = items\n else:\n print(\"Unexpected line when parsing 'info proc mappings': %s\" % line)\n continue\n\n start = int(start, 16)\n size = int(size, 16)\n end = int(end, 16)\n\n new_range = MemoryRange(start, size, source, source)\n self.tentative_add_range(new_range)", "def range_table(self):\n range_table_base = []\n if self.block_mask != None:\n range_table_length = len(self.block_mask)\n else:\n range_table_length = self.block_num\n\n for i in range(range_table_length):\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.k_size))\n range_table_base.append(len(self.pool_type))\n\n return range_table_base", "def get_eap_mab(self):\n for m in self.get_tag(self.mac):\n v = m[1]\n if not isinstance(v, int):\n v = self._get_vlan(v)\n yield [m[0], v]", "def _fragment(self):\n\n if self._bmap is None:\n raise ValueError('bonds must be set for fragment determination, '\n 'use `setBonds`')\n\n fids = np.zeros(self._n_atoms, int)\n fdict = {}\n c = 0\n for a, b in self._bonds:\n af = fids[a]\n bf = fids[b]\n if af and bf:\n if af != bf:\n frag = fdict[af]\n temp = fdict[bf]\n fids[temp] = af\n frag.extend(temp)\n fdict.pop(bf)\n elif af:\n fdict[af].append(b)\n fids[b] = af\n elif bf:\n fdict[bf].append(a)\n fids[a] = bf\n else:\n c += 1\n fdict[c] = [a, b]\n fids[a] = fids[b] = c\n fragindices = np.zeros(self._n_atoms, int)\n fragments = []\n append = fragments.append\n fidset = set()\n c = 0\n for i, fid in enumerate(fids):\n if fid in fidset:\n continue\n elif fid:\n fidset.add(fid)\n indices = fdict[fid]\n indices.sort()\n append(indices)\n fragindices[indices] = c\n c += 1\n else:\n # these are non-bonded atoms, e.g. ions\n fragindices[i] = c\n append([i])\n c += 1\n self._data['fragindex'] = fragindices\n self._fragments = fragments", "def find_vtables_aggressive( firstaddr = 0, lastaddr = 0x7FFFFFFF ):\r\n\tvalid_reg_strings = [ \"[eax\", \"[ebx\", \"[ecx\", \"[edx\", \"[esi\", \"[edi\",\\\r\n\t\t\"[ebp\" ]\r\n\tif firstaddr == 0:\r\n\t\tstartaddr = nextaddr( firstaddr)\r\n\telse:\r\n\t\tstartaddr = firstaddr\r\n\tvtables = []\r\n\twhile startaddr != BADADDR:\r\n\t\t#\r\n\t\t# Check if the offset is written \r\n\t\t#\r\n\t\txrefs = get_drefs_to( startaddr )\r\n\t\tis_written_to_beginning = 0\r\n\t\tfor xref in xrefs:\r\n\t\t\tline = get_disasm_line( xref )\r\n\t\t\tif len( line ) >= 3:\r\n\t\t\t\tfor reg in valid_reg_strings:\r\n\t\t\t\t\tif line[2].find( reg ) != -1:\r\n\t\t\t\t\t\tis_written_to_beginning = 1\r\n\t\t#\r\n\t\t# Check if \r\n\t\t#\r\n\t\ti = 0\r\n\t\tif is_written_to_beginning == 1:\r\n\t\t\twhile get_first_dref_from( startaddr + (4 * (i+1))) != BADADDR:\r\n\t\t\t\tea = get_first_dref_from( startaddr + (4*i))\r\n\t\t\t\tfunc = get_func( ea )\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif func.startEA != ea:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\texcept( AttributeError ):\r\n\t\t\t\t\tbreak;\r\n\t\t\t\ti = i + 1\r\n\t\t\t\tif len( get_drefs_to( startaddr + ( 4 * (i)))) != 0:\r\n\t\t\t\t\tbreak;\r\n\t\tif i > 0:\r\n\t\t\tvtables.append( [ startaddr, startaddr + (4*i) ] )\r\n\t\tif i > 0:\r\n\t\t\tstartaddr = startaddr + i*4\r\n\t\telif get_item_size( startaddr ) != 0:\r\n\t\t\tstartaddr = startaddr + get_item_size( startaddr )\r\n\t\telse:\r\n\t\t\tstartaddr = startaddr + 1\r\n\t\tif nextaddr( startaddr ) == BADADDR:\r\n\t\t\tbreak\r\n\t\tif startaddr >= lastaddr:\r\n\t\t\tbreak\r\n\treturn vtables", "def compute_fragments(self):\n self.fragments = []\n for part in self.parts:\n for fragment in self.compute_digest(part):\n # The part is not a fragment if it hasn't been cut at all and\n # therefore doesn't have sticky ends. Exclude from fragments.\n if not hasattr(fragment.seq, \"left_end\"):\n continue\n fragment.original_part = part\n self.annotate_fragment_with_part(fragment)\n self.fragments.append(fragment)", "def find_all(v):\n screen = G.DEVICE.snapshot(quality=ST.SNAPSHOT_QUALITY)\n return v.match_all_in(screen)", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def getSegments(self) -> List[int]:\n ...", "def sm_get_matching_blocks(s1,s2,min_length=1):\n anslist= list(SM(None, s1, s2).get_matching_blocks())\n\n\n anslist = [ l for l in anslist if l.size>=min_length]\n\n anslist=[ (s1[l.a:l.a+l.size], l.a, l.b, l.size) for l in anslist]\n return anslist", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]" ]
[ "0.65163994", "0.64793557", "0.6337672", "0.62566894", "0.62267727", "0.58809084", "0.5855773", "0.57009506", "0.5661508", "0.55515385", "0.53324705", "0.5272335", "0.52058667", "0.51986295", "0.518908", "0.517827", "0.51568276", "0.5139852", "0.50838804", "0.50652164", "0.506309", "0.5030145", "0.502198", "0.501432", "0.50124925", "0.50091165", "0.4985233", "0.49812204", "0.49610463", "0.4947419" ]
0.74009
0
Returns all the free fragments in a VMAC Range. [Arguments]
def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None): return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def ShowIVACFreeList(cmd_args=[], cmd_options={}):\n if not cmd_args:\n raise ArgumentError('Please provide <ipc_voucher_attr_control_t>')\n ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')\n print GetIPCVoucherAttrControlSummary.header\n print GetIPCVoucherAttrControlSummary(ivac)\n if unsigned(ivac.ivac_freelist) == 0:\n print \"ivac table is full\"\n return\n print \"index \" + GetIPCVoucherAttributeEntrySummary.header\n next_free = unsigned(ivac.ivac_freelist)\n while next_free != 0:\n print \"{: <5d} \".format(next_free) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[next_free]))\n next_free = unsigned(ivac.ivac_table[next_free].ivace_next)", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def select_vertices_free(self):\n _filter = compas_rhino.rs.filter.point\n guids = compas_rhino.rs.GetObjects(message=\"Select Free Vertices.\", preselect=True, select=True, group=False, filter=_filter)\n if guids:\n keys = [self.guid_vertex_free[guid] for guid in guids if guid in self.guid_vertex_free]\n else:\n keys = []\n return keys", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def get_available_memory_blocks(self):\n status = self.get_status()\n return status & (STATUS_MEM_0_EMPTY | STATUS_MEM_1_EMPTY)", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage", "def get_all_volume_usage(self, context, compute_host_bdms):\n volusage = []\n return volusage", "def fragmentation(free_resources_gaps, p=2):\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sum(fi**p) / sum(fi)**p)\n frag.set_value(i, frag_i)\n return frag", "def get_free_indices(program, program_len):\n used = get_used_indices(program)\n total = set(range(program_len + len(program.input_types)))\n return total - used", "def free(range_lst, range_start, range_end, user_start, user_end):\n \n # Attempt to calculate range to subtract times from\n minute_range = []\n # range_start = arrow.get(range_start, \"MM/DD/YYYY hh:mm A\")\n # range_start_format = range_start.format(\"MM/DD/YYYY hh:mm A\")\n # range_end = arrow.get(range_end, \"MM/DD/YYYY hh:mm A\")\n # range_end_format = range_end.format(\"MM/DD/YYYY hh:mm A\")\n\n # Calculate range of minutes between potential start and end given by event creator\n minute_range = []\n for r in arrow.Arrow.range(\"minute\", range_start, range_end):\n minute_range.append(r)\n\n # Attempt to calculate user range of busy times\n try:\n user_start = arrow.get(user_start, \"MM/DD/YYYY hh:mm A\")\n user_end = arrow.get(user_end, \"MM/DD/YYYY hh:mm A\")\n\n user_range = arrow.Arrow.range(\"minute\", user_start, user_end)\n except:\n logger.info(\"MODULE 'free_times' FUNCTION 'free' -- Can't calculate USER range using {} - {}\".format(user_start, user_end))\n # Return empty list on fail\n return []\n\n # Subtract times from user_range from the general minute_range\n for time in user_range:\n if time in minute_range:\n index = minute_range.index(time)\n # None type will be used to generate range in flask_main find_busy_times\n minute_range[index] = None\n \n return minute_range", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def fragments(self):\n return len(self.byteruns())", "def fusion_api_get_vmac_range(self, uri=None, param='', api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param=param)", "def fragments(self):\n return self.fragments_tree.vchildren_not_empty", "def free_slots(self, day_bounds: Slot):\n free_slots: List[Slot] = []\n time_ptr = day_bounds.start\n for meeting in self.meetings:\n if meeting.start > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, meeting.start.time_str))\n time_ptr = meeting.end\n if day_bounds.end > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, day_bounds.end.time_str))\n return free_slots" ]
[ "0.688477", "0.65210354", "0.6447009", "0.6393782", "0.62968355", "0.61768275", "0.60178125", "0.5956286", "0.58735996", "0.5638696", "0.5584719", "0.54974365", "0.5348363", "0.52694863", "0.5263789", "0.52510947", "0.5230552", "0.5205711", "0.5167516", "0.51618314", "0.50921637", "0.50921637", "0.50906783", "0.50871634", "0.5042278", "0.5027046", "0.50157034", "0.50074077", "0.4979501", "0.4975083" ]
0.7555054
0
Creates a VSN Range. [Arguments]
def fusion_api_create_vsn_range(self, body, api=None, headers=None): return self.vsnrange.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, range):\n raise NotImplementedError", "def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)", "def build_range(identity: str, type_uri: str = SBOL_RANGE):\n start = 1\n end = 1\n obj = Range(PYSBOL3_MISSING, start, end, identity=identity, type_uri=type_uri)\n # Remove the placeholder values\n obj._properties[SBOL_SEQUENCES] = []\n obj._properties[SBOL_START] = []\n obj._properties[SBOL_END] = []\n return obj", "def create_range(range_class):\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n return Range.objects.create(\n name=range_class.name,\n proxy_class=_class_path(range_class))", "def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )", "def _build_range(self) -> str:\n return build_sequence(filter(None, (self.uids, self.sequence)))", "def define_snps(genome, num):\n for n in range(num):\n snp_pos = get_snp_pos(genome)\n var = Variant(\"snp\", snp_pos, snp_pos, 0)\n genome.add_variant(var)\n genome.unavail_pos.append(snp_pos)", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def computerange(lyrindex):\n for i in range(len(lyrindex)):\n if i != len(lyrindex) - 1:\n if lyrindex[i][0].find('.') > 0: # special case where inventory files have two records\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+2][1]) - 1) )\n else:\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+1][1]) - 1) )\n else:\n lyrindex[-1].append( 'range=%s' % ( lyrindex[-1][1] ) ) \n return lyrindex", "def range(*args:List[str], negate:bool=False) -> str:\n character_set = \"\"\n for arg in args:\n try:\n start, end = arg\n character_set += f\"{start}-{end}\"\n except:\n raise\n\n negate = \"^\" if negate else \"\"\n return f\"[{negate}{character_set}]\"", "def fusion_api_create_vwwn_range(self, body, api=None, headers=None):\n return self.vwwnrange.create(body, api, headers)", "def fusion_api_get_vsn_range(self, uri=None, param='', api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param=param)", "def make_range(_num_list):\n if len(_num_list) == 1:\n out_str = str(_num_list[0])\n\n elif len(_num_list) == 2:\n out_str = str(_num_list[0]) + ', ' + str(_num_list[1])\n\n else:\n out_str = str(_num_list[0]) + '-' + str(_num_list[-1])\n\n return out_str", "def gen_qrange(prefix, nsnps, prunestep, every=False, qrangefn=None):\n order = ['label', 'Min', 'Max']\n # dtype = {'label': object, 'Min': float, 'Max': float}\n if qrangefn is None:\n # Define the number of snps per percentage point and generate the range\n percentages = set_first_step(nsnps, prunestep, every=every)\n snps = np.around((percentages * nsnps) / 100).astype(int)\n try:\n # Check if there are repeats in ths set of SNPS\n assert sorted(snps) == sorted(set(snps))\n except AssertionError:\n snps = ((percentages * nsnps) / 100).astype(int)\n assert sorted(snps) == sorted(set(snps))\n labels = ['%.2f' % x for x in percentages]\n if float(labels[-1]) > 100.:\n labels[-1] = '100.00'\n if snps[-1] != nsnps:\n snps[-1] = nsnps\n assert snps[-1] == nsnps\n assert labels[-1] == '100.00'\n # Generate the qrange file\n qrange = '%s.qrange' % prefix\n qr = pd.DataFrame({'label': labels, 'Min': np.zeros(len(percentages)),\n 'Max': snps}).loc[:, order]\n qr.to_csv(qrange, header=False, index=False, sep=' ')\n else:\n qrange = qrangefn\n qr = pd.read_csv(qrange, sep=' ', header=None,\n names=order) # , dtype=dtype)\n return qr, qrange", "def vrange(starts, stops):\n stops = np.asarray(stops)\n l = stops - starts # Lengths of each range.\n return np.repeat(stops - l.cumsum(), l) + np.arange(l.sum()), l.cumsum()", "def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )", "def _parse_nexus_vni_range(self, tunnel_range):\n for ident in tunnel_range:\n if not self._is_valid_nexus_vni(ident):\n raise exc.NetworkTunnelRangeError(\n tunnel_range=tunnel_range,\n error=_(\"%(id)s is not a valid Nexus VNI value.\") %\n {'id': ident})\n\n if tunnel_range[1] < tunnel_range[0]:\n raise exc.NetworkTunnelRangeError(\n tunnel_range=tunnel_range,\n error=_(\"End of tunnel range is less than start of \"\n \"tunnel range.\"))", "def genrange(gen, *args):\n log = logging.getLogger(\"hepfab.util.genrange\")\n\n start, stop, step = 1, None, 1\n arglen = len(args)\n if arglen == 3:\n start, stop, step = args\n elif arglen == 2:\n start, stop = args\n else:\n (stop,) = args\n stop += 1\n\n log.debug(\"genrange(%r, %r, %r, %r)\", gen, start, stop, step)\n spec = \"%sn%%0%d.d\" % (gen, digits(stop))\n log.debug(\"Produced spec %r\", spec)\n\n for i in xrange(start, stop, step):\n yield spec % i", "def tnuc_range2gnuc_range(self, tbeg, tend):\n np = self.position_array()\n # print self.\n # print len(np)\n # print tbeg, tend\n return tnuc_range2gnuc_range_(np, tbeg, tend)", "def get_dhcp_range(options, index):\n second_octet = 160 + index\n return \"192.%s.1.2-192.%s.255.254\" % (second_octet, second_octet)", "def Range(self, from: int, to: int) -> BaseVector:", "def lrange(self, name, start, end):\r\n return self.format_inline('LRANGE', name, start, end)", "def test_create_one_start(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 10002, 12001, {\"from\": accounts[2]})\n check_ranges([(1, 10001)], [(10001, 10002), (12001, 20001)], [(20001, 30001)], [(10002, 12001)])", "def __init__(self, ranges=None, *args, **kwargs):\n self.ranges = ranges\n super(DiscreteGeneticAlgorithm, self).__init__(*args, **kwargs)", "def create_svs(self, svs_name, vmnic, num_ports=8):\n\n svs = vim.host.VirtualSwitch.Specification()\n svs.numPorts = num_ports\n svs.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[vmnic])\n host_network_obj = self.host_obj.configManager.networkSystem\n host_network_obj.AddVirtualSwitch(vswitchName=svs_name, spec=svs)", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def range(self) -> str:\n return f\"{self.name}!A:F\"" ]
[ "0.6339749", "0.5785153", "0.5731688", "0.5553078", "0.55106306", "0.54108983", "0.53503424", "0.53109884", "0.5308156", "0.52729785", "0.5237094", "0.5219046", "0.516484", "0.514242", "0.51333964", "0.51028806", "0.50952756", "0.5072364", "0.5054253", "0.5041013", "0.5039785", "0.50313735", "0.50282645", "0.50252193", "0.5018912", "0.5017549", "0.49839458", "0.4961991", "0.49618912", "0.49593323" ]
0.66736615
0
Updates an VSN Range. [Arguments]
def fusion_api_edit_vsn_range(self, body, uri, api=None, headers=None): return self.vsnrange.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, **kwargs):\n for kwarg in kwargs:\n if kwarg not in self._ALLOWED_KEYWORDS:\n raise NNDCInputError(f'Unknown keyword: \"{kwarg}\"')\n if \"nuc\" in kwargs:\n self._data[\"spnuc\"] = \"name\"\n self._data[\"nuc\"] = kwargs[\"nuc\"]\n for x in [\"z\", \"a\", \"n\"]:\n # handle Z, A, and N settings\n if x in kwargs:\n self._data[\"spnuc\"] = \"zanrange\"\n self._data[x + \"min\"], self._data[x + \"max\"] = _format_range(\n (kwargs[x], kwargs[x])\n )\n # handle *_range, *_any, *_odd, *_even\n elif x + \"_range\" in kwargs:\n self._data[\"spnuc\"] = \"zanrange\"\n self._data[x + \"min\"], self._data[x + \"max\"] = _format_range(\n kwargs[x + \"_range\"]\n )\n if self._data[x + \"min\"] == \"\":\n self._data[x + \"min\"] = \"0\"\n if self._data[x + \"max\"] == \"\":\n self._data[x + \"max\"] = \"300\"\n if x + \"_any\" in kwargs:\n self._data[\"even\" + x] = \"any\"\n elif x + \"_even\" in kwargs:\n self._data[\"even\" + x] = \"even\"\n elif x + \"_odd\" in kwargs:\n self._data[\"even\" + x] = \"odd\"\n # handle half-life range condition\n if \"t_range\" in kwargs:\n self._data[\"tled\"] = \"enabled\"\n self._data[\"tlmin\"], self._data[\"tlmax\"] = _format_range(kwargs[\"t_range\"])", "def update(self):\n self._sync_ranges()\n self._update_params()", "def change_dhcp_range(self, start, end, prefix_length):\n self.execute_script('change_dhcp_range', start, end, prefix_length)", "def fusion_api_edit_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.update(body, uri, api, headers)", "def _sync_range_points(self, args, pkey, rwidget):\n if pkey in args:\n rwidget.ui.points.setValue(args[pkey])\n else:\n args[pkey] = rwidget.ui.points.value()", "def fusion_api_edit_vmac_range(self, body, uri, api=None, headers=None):\n return self.vmacrange.update(body, uri, api, headers)", "def update_source_range(self):\r\n self.source_range_index = self.SourceRangeValue.currentIndex()\r\n self.cmd = None\r\n if self.source_range_type_index and self.connected:\r\n self.cmd = self.source_range_switch.get(\r\n self.source_range_index, None)\r\n self.I_source.write(self.cmd)", "def fusion_api_edit_ipv4_range(self, body, uri, api=None, headers=None):\n return self.ipv4range.update(body, uri, api, headers)", "def setRange(self, x_range, y_range):\n self._pipe.send(\"range,%f,%f,%f,%f\" % (x_range + y_range))", "def update(self, v_input):\n\n self.v = v_input", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits", "def voltage_source_update(n):\n Vs = voltage_source(n)\n\n if Rg > 0:\n # compute the multiplier coefficients (from (2.62))\n b1 = (C * dx * 0.5) / dt\n b2 = 0.5 / Rg\n c1 = 1.0 / (b1 + b2)\n c2 = b1 - b2\n V[0] = c1 * (c2 * V[0] - I[0] + (Vs / Rg))\n\n else:\n V[0] = Vs", "def update(*args):", "def fusion_api_create_vsn_range(self, body, api=None, headers=None):\n return self.vsnrange.create(body, api, headers)", "def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")", "def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)", "def update_frame(self, key, ranges=None):", "def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)", "def update(self, v, r):\n pass", "def ModifyRange(self, rangeText, nucleotide='-'):\n nucleotide = nucleotide.upper()\n if (self.translated == True):\n self.AlertMessage(\"Can't modify protein sequences.\", 'medium')\n elif (nucleotide not in ['A', 'G', 'C', 'T', 'R', 'K', 'S', 'W', 'M', 'Y', 'D', 'V', 'B', 'H', 'N', '-']):\n self.AlertMessage('Invalid nucleotide. (only AGTC- and IUB nucleotide codes are permitted)', 'high')\n else:\n startTaxon, stopTaxon, startColumn, stopColumn = self.ParseIndex(rangeText)\n if (startTaxon >= 0): #Make sure we have a valid range\n taxon = 0\n newSequences = []\n modificationLength = (stopColumn - startColumn) + 1\n for Sequence in self.alignment:\n if (taxon in range(startTaxon, stopTaxon + 1)):\n if (startColumn > 0):\n Sequence.seq = Sequence.seq[:startColumn] + Seq(nucleotide * modificationLength) + Sequence.seq[stopColumn + 1:]\n else:\n Sequence.seq = Seq(nucleotide * modificationLength) + Sequence.seq[stopColumn + 1:]\n newSequences.append(Sequence)\n taxon += 1\n self.alignment = MultipleSeqAlignment(newSequences)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def _parse_nexus_vni_range(self, tunnel_range):\n for ident in tunnel_range:\n if not self._is_valid_nexus_vni(ident):\n raise exc.NetworkTunnelRangeError(\n tunnel_range=tunnel_range,\n error=_(\"%(id)s is not a valid Nexus VNI value.\") %\n {'id': ident})\n\n if tunnel_range[1] < tunnel_range[0]:\n raise exc.NetworkTunnelRangeError(\n tunnel_range=tunnel_range,\n error=_(\"End of tunnel range is less than start of \"\n \"tunnel range.\"))", "def setRange(self, x_range, y_range):\n pass", "def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)", "def _modify_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n modifier: Callable[[str, int, str], Any],\n datacenter_id: int,\n datacenter_name: str,\n) -> List[Any]:\n if (not start_letter.isalpha) or (not stop_letter.isalpha):\n raise InvalidRangeError\n\n if start_number < 1 or start_number > stop_number:\n raise InvalidRangeError\n\n if start_letter.upper() > stop_letter.upper():\n raise InvalidRangeError\n\n alphabet: str = string.ascii_uppercase\n letters: str = alphabet[\n alphabet.index(start_letter.upper()) : alphabet.index(stop_letter.upper()) + 1\n ]\n\n results: List[Any] = []\n try:\n for letter in letters:\n for number in range(start_number, stop_number + 1):\n label = f\"{letter}{number}\"\n results.append(modifier(label, datacenter_id, datacenter_name))\n except (\n DBWriteException,\n InvalidRangeError,\n RackNotEmptyError,\n RackDoesNotExistError,\n ):\n raise\n\n return results", "def set_vin(self, value):\n return self.sendCMD(\"ATSET VIN={}\".format(value))", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def update(\n self,\n ECI=None,\n Enabled=None,\n MCC=None,\n MNC=None,\n Name=None,\n ParentMme=None,\n ParentSgw=None,\n RAILAC=None,\n RAIMCC1=None,\n RAIMCC2=None,\n RAIMCC3=None,\n RAIMNC1=None,\n RAIMNC2=None,\n RAIMNC3=None,\n RAIRAC=None,\n TAC=None,\n ):\n # type: (str, bool, str, str, str, str, str, str, int, int, int, int, int, int, str, str) -> EgtpNbS5S8Range\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def Update(self,n,l):\n\t\tself.n = n\n\t\tself.l = l", "def nurbsEditUV(*args, angle: Union[float, bool]=0.0, pivotU: Union[float, bool]=0.0, pivotV:\n Union[float, bool]=0.0, relative: bool=True, rotateRatio: Union[float,\n bool]=1.0, rotation: bool=True, scale: bool=True, scaleU: Union[float,\n bool]=0.0, scaleV: Union[float, bool]=0.0, uValue: Union[float, bool]=0.0,\n vValue: Union[float, bool]=0.0, q=True, query=True, **kwargs)->Union[bool,\n Any]:\n pass", "def update(self, args):\n pass" ]
[ "0.57381266", "0.5490792", "0.54721355", "0.5463872", "0.5336311", "0.5287006", "0.5208189", "0.51468337", "0.51341605", "0.50885314", "0.5053471", "0.5053382", "0.5040719", "0.50045073", "0.49524987", "0.4905989", "0.4900182", "0.48884127", "0.4885453", "0.48783058", "0.4865367", "0.48151967", "0.48151034", "0.480891", "0.47597626", "0.47390532", "0.47383088", "0.47280285", "0.47190598", "0.47122073" ]
0.6712275
0
Deletes a VSN range based on name OR uri. [Arguments]
def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None): return self.vsnrange.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_vwwn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vwwnrange.delete(name, uri, api, headers)", "def fusion_api_delete_vmac_range(self, name=None, uri=None, api=None, headers=None):\n return self.vmacrange.delete(name, uri, api, headers)", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def deleteAddressRange(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def delete_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_delete_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def DeleteRange(self, r):\n self.__context.builder.DocumentDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end)\n left = self._blip_data.content[:r.start]\n right = self._blip_data.content[r.end + 1:]\n self._blip_data.content = left + right", "def Multi_vm_delete(self, vm_name, s_range, e_range):\n try:\n for i in range(s_range, e_range):\n new_name = vm_name + \"%s\" % i\n self.destroy_vm(new_name)\n except Exception as error:\n print(error.message)\n raise error", "def delete(fits: Optional[str], start: Optional[str], end: Optional[str], out: Optional[str]):\n delete_in_ssda(fits=fits, start=start, end=end, out=out)", "def delete(self, uri, where, selectionArgs):\n pass", "def DeleteRange(self, rangeText, silent=False):\n startTaxon, stopTaxon, startColumn, stopColumn = self.ParseIndex(rangeText)\n if (self.translated == True):\n startColumn = startColumn * 3\n stopColumn = (stopColumn * 3) + 2\n if (startTaxon >= 0): #Make sure we had a valid range\n changeLength = 0\n deleteTaxon = False\n if ((startColumn == 0) & (stopColumn == len(self.alignment[0]) - 1)):\n deleteTaxon = True\n if ((startTaxon > 0) | (stopTaxon < len(self.alignment) - 1)):\n changeLength = (stopColumn - startColumn) + 1\n taxon = 0\n newSequences = []\n for Sequence in self.alignment:\n if (taxon in range(startTaxon, stopTaxon + 1)):\n if (not deleteTaxon):\n if (startColumn > 0):\n Sequence.seq = Sequence.seq[:startColumn] + Sequence.seq[stopColumn + 1:]\n else:\n Sequence.seq = Sequence.seq[stopColumn + 1:]\n if (changeLength):\n Sequence.seq = Sequence.seq + Seq('-' * changeLength)\n newSequences.append(Sequence)\n else:\n newSequences.append(Sequence)\n taxon += 1\n self.alignment = MultipleSeqAlignment(newSequences)\n if (not silent):\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)", "def DeleteAnnotationsInRange(self, r, name):\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end,\n name)\n # TODO(davidbyttow): split local annotations.", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)", "def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0", "def delete_node(self, uri):\n if self.sm.already_exists('nodes', uri):\n self.sm.delete_node(uri)\n else:\n raise VOSpaceError(404, \"The specified node does not exist.\")", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete_segment(self, name: str) -> None:\n self._status.check_authority_for_draft()\n\n delete_data: Dict[str, Any] = {\"segmentName\": name}\n delete_data.update(self._status.get_status_info())\n\n self._client.open_api_do(\"DELETE\", \"segments\", self.dataset_id, json=delete_data)", "def UnsafeDestroyRange(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unlink(address):", "def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4subnet.delete(name, uri, api, headers)", "def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def delete_book(code: str):\n pass", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)", "def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)", "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)" ]
[ "0.7234016", "0.67734325", "0.67198455", "0.6621982", "0.65296775", "0.6075114", "0.5923171", "0.5770565", "0.56951976", "0.56480885", "0.55567807", "0.54519486", "0.54502946", "0.5450167", "0.54362255", "0.5407726", "0.5364662", "0.52880555", "0.52648115", "0.5245897", "0.5229966", "0.52209115", "0.52201563", "0.51802874", "0.51561224", "0.51530665", "0.5140203", "0.5112177", "0.51027", "0.50976807" ]
0.81573164
0
Gets a default or paginated collection of VSN Ranges. [Arguments]
def fusion_api_get_vsn_range(self, uri=None, param='', api=None, headers=None): return self.vsnrange.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_range(n0: int, n1: int, ns: int) -> List[int]:\n # Return a range as a list\n def lrange(a, b, n=1) -> List[int]:\n return list(range(a, b, n))\n # Get the in-bounds part of the range\n n_range = lrange(max(0, n0), min(ns, n1))\n # Handle out-of-bounds indices by reflection across boundaries\n if n0 < 0:\n # Underflow\n n_range = lrange(-n0, 0, -1) + n_range\n if n1 > ns:\n # Overflow\n n_range = n_range + lrange(ns - 1, 2 * ns - n1 - 1, -1)\n\n return n_range", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )", "def ranges(self):\n return self._ranges", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def test_get_range(self):\n pass", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangePatchArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def fusion_api_collect_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.collect(body, uri, api, headers)", "def lrange(self, name, start, end):\n self.connect()\n self._write('LRANGE %s %s %s\\r\\n' % (name, start, end))\n return self._get_multi_response()", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def get_range(start, stop):\n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n print(nums)", "def range(self):\n return self.range_array", "def lrange(self, name, start, end):\r\n return self.format_inline('LRANGE', name, start, end)", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def get_range( value ):\n return list(range(value))", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums", "def get_range(value):\n return list(range(value))", "def zrange(self, name, start, end, desc=False, withscores=False):\r\n if desc:\r\n return self.zrevrange(name, start, end, withscores)\r\n pieces = ['ZRANGE', name, start, end]\r\n if withscores:\r\n pieces.append('withscores')\r\n return self.format_inline(*pieces, **{'withscores': withscores})", "def ranges(self) -> List[Range]:\n return list(iter(self._ranges))", "def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "async def getRanges(self, stock, sharesOut, company_name, index, session):\n # if stock got a dot change to dash\n if \".\" in stock:\n stock = stock.replace(\".\", \"-\")\n\n request = await session.request(\n method=\"GET\", url=API.format(stock, \"1604793600\", \"1605571200\")\n )\n dataJson = await request.json()\n try:\n startRange = dataJson[\"chart\"][\"result\"][0][\"meta\"][\"firstTradeDate\"]\n except:\n error = dataJson[\"chart\"][\"error\"][\"description\"]\n print(error)\n # self.df.drop(index=index, inplace=True)\n return\n\n endRange = dataJson[\"chart\"][\"result\"][0][\"meta\"][\"currentTradingPeriod\"][\n \"regular\"\n ][\"end\"]\n if startRange == None or endRange == None:\n print(\"range was not found\")\n return\n\n await self.getData(\n startRange, endRange, stock, sharesOut, company_name, session\n )", "def EnergyRanges(self, default=[None]):\n return self.data.get('metadata', {}).get('energy_ranges', default)", "def _range_to_list(cls, rng):\n ends = rng.split(\"-\")\n if len(ends) != 2:\n return []\n\n return list(range(int(ends[0]), int(ends[1]) + 1))" ]
[ "0.62501353", "0.5966355", "0.5966355", "0.5966355", "0.5966355", "0.59443134", "0.5804418", "0.5760165", "0.5759403", "0.5715633", "0.5715633", "0.5715633", "0.5715633", "0.570298", "0.56451786", "0.5644051", "0.56338984", "0.56107515", "0.5601582", "0.5578253", "0.55693614", "0.55651826", "0.5540617", "0.5536342", "0.55354184", "0.5534469", "0.5525117", "0.5499169", "0.5495439", "0.5487744" ]
0.683681
0
Returns all fragments that have been allocated from a VSN Range [Arguments]
def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None): return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def run_fragments(options):\n fragments.fragments(\n bam=options.bam,\n fragment_path=options.fragments,\n min_mapq=options.min_mapq,\n nproc=options.nproc,\n cellbarcode=options.barcodetag,\n readname_barcode=options.barcode_regex,\n chromosomes=options.use_chrom,\n cells=options.cells,\n max_distance=options.max_distance,\n chunksize=options.chunksize,\n )", "def fragments(self):\n return len(self.byteruns())", "def list_fragments(self):\n return list(self.data.fragments)", "def read_fragments(filename: str):\n with open(filename, \"r\") as fd:\n # Read fragments and remove linebreaks from string\n fragments = [frag.strip() for frag in fd.readlines()]\n return fragments", "def fragments(self):\n return self.fragments_tree.vchildren_not_empty", "def sm_get_matching_blocks(s1,s2,min_length=1):\n anslist= list(SM(None, s1, s2).get_matching_blocks())\n\n\n anslist = [ l for l in anslist if l.size>=min_length]\n\n anslist=[ (s1[l.a:l.a+l.size], l.a, l.b, l.size) for l in anslist]\n return anslist", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def fusion_api_get_vsn_range(self, uri=None, param='', api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_allocate_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.allocate(body, uri, api, headers)", "def get_exons(chromStart, chromEnd, blockSizes, blockStarts):\n blockSizes = [int(i) for i in blockSizes.split(\",\") if not i == \"\" ]\n blockStarts = [int(i) for i in blockStarts.split(\",\") if not i == \"\" ]\n n = len(blockSizes)\n exons = []\n #print(\"block: \" + str(n))\n #print(blockSizes, blockStarts)\n for i in range(n):\n #print(i)\n blockStart = blockStarts[i]\n blockSize = blockSizes[i]\n exonStart = chromStart + blockStart\n exonEnd = exonStart + blockSize\n exons.append([exonStart, exonEnd])\n return(exons)", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def populate_ranges(self,):\n self.ranges = list()\n # coredump: info target shows all sections in full detail\n # live debug: only file-backed sections are shown\n targetinfo = gdb.execute(\"info target\", False, True)\n for line in targetinfo.splitlines():\n line = line.strip()\n if line.startswith('`'):\n line = line.split(\"'\")[1]\n source = line[1:]\n continue\n if not line.startswith(\"0x\"):\n continue\n\n start, dash, end, str_is, memtype = line.split(maxsplit=4)\n assert(dash == '-' and str_is == 'is')\n start = int(start, 16)\n end = int(end, 16)\n new_range = MemoryRange(start, end-start, source, memtype)\n startoverlap = self.get_range(start)\n endoverlap = self.get_range(end)\n\n if endoverlap == startoverlap:\n endoverlap = None\n\n #TODO: splitup and punch holes/replace\n if memtype.startswith('.'):\n # gdb reports loadXXX sections on top of file-backed sections of the binary\n # probably because the kernel maps writeable pages on top of them\n # Therefore, keep the more accurate description from the file-backed section\n if startoverlap is not None and startoverlap.memtype == MemoryType.General:\n previous, current = self.split_range_at(start)\n self.ranges.remove(current)\n startoverlap = None\n if endoverlap is not None and endoverlap.memtype == MemoryType.General:\n current, end = self.split_range_at(end)\n self.ranges.remove(current)\n endoverlap = None\n\n if startoverlap is not None and endoverlap is not None:\n print(\"Overlapping memory ranges: %s in %s -> %s\" %\n (new_range, str(startoverlap), str(endoverlap)))\n bisect.insort(self.ranges, new_range)\n\n # live target: run-time allocated memory and some file-backed sections\n # There typically is overlap with the 'info target' output, so give precedence\n # to the previously added ranges\n mappinginfo = gdb.execute(\"info proc mappings\", False, True)\n for line in mappinginfo.splitlines():\n line = line.strip()\n if not line.startswith(\"0x\"):\n continue\n\n items = line.split()\n if len(items) == 4:\n start, end, size, offset = items\n source = \"unknown\"\n elif len(items) == 5:\n start, end, size, offset, source = items\n else:\n print(\"Unexpected line when parsing 'info proc mappings': %s\" % line)\n continue\n\n start = int(start, 16)\n size = int(size, 16)\n end = int(end, 16)\n\n new_range = MemoryRange(start, size, source, source)\n self.tentative_add_range(new_range)", "def procInfoParser (regions):\n\tmslines=regions.split('\\n')\n\tretarray=[]\n\tfor s in mslines:\n\t\tif (s.find(\"0x\") > -1):\n\t\t\taddresses=s.split()\n\t\t\tstartaddress=int(addresses[0], 16)\n\t\t\tendaddress=int(addresses[1],16)\n\t\t\tsize=endaddress-startaddress\n\t\t\tretarray.append([startaddress, endaddress, size])\n\treturn retarray", "def length_n_frags(mol, initial):\n frags = []\n current_frag = initial\n if len(current_frag) >= 4:\n return [current_frag]\n\n neighbor_indices = mol.graph.neighbors[current_frag[-1]]\n for neighbor_ind in neighbor_indices:\n if neighbor_ind not in current_frag:\n new_frag = current_frag + (neighbor_ind, )\n frags += length_n_frags(mol, new_frag)\n return frags", "def getSegments(self) -> List[int]:\n ...", "def find_segment(bv: binaryninja.binaryview.BinaryView, name: str) -> List[Tuple[int, int]]:\n result = []\n for sn in bv.sections:\n sec = bv.get_section_by_name(sn)\n if sec.name == name:\n result.append((sec.start, sec.end))\n return result", "def get_genomic_range( self ):\n return self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]" ]
[ "0.6541962", "0.6333344", "0.6251593", "0.6038683", "0.59697324", "0.5958482", "0.5906588", "0.5755165", "0.54627466", "0.5343313", "0.5290266", "0.5272229", "0.5192584", "0.51895595", "0.5129478", "0.51206946", "0.5109758", "0.50958526", "0.5066217", "0.5060956", "0.5030133", "0.50035876", "0.5001329", "0.49677482", "0.49606332", "0.49222243", "0.49203494", "0.48994377", "0.4897036", "0.48898602" ]
0.707599
0
Returns all the free fragments in a VSN Range. [Arguments]
def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None): return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def fragments(self):\n return self.fragments_tree.vchildren_not_empty", "def run_fragments(options):\n fragments.fragments(\n bam=options.bam,\n fragment_path=options.fragments,\n min_mapq=options.min_mapq,\n nproc=options.nproc,\n cellbarcode=options.barcodetag,\n readname_barcode=options.barcode_regex,\n chromosomes=options.use_chrom,\n cells=options.cells,\n max_distance=options.max_distance,\n chunksize=options.chunksize,\n )", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def fragments(self):\n return len(self.byteruns())", "def select_vertices_free(self):\n _filter = compas_rhino.rs.filter.point\n guids = compas_rhino.rs.GetObjects(message=\"Select Free Vertices.\", preselect=True, select=True, group=False, filter=_filter)\n if guids:\n keys = [self.guid_vertex_free[guid] for guid in guids if guid in self.guid_vertex_free]\n else:\n keys = []\n return keys", "def free(range_lst, range_start, range_end, user_start, user_end):\n \n # Attempt to calculate range to subtract times from\n minute_range = []\n # range_start = arrow.get(range_start, \"MM/DD/YYYY hh:mm A\")\n # range_start_format = range_start.format(\"MM/DD/YYYY hh:mm A\")\n # range_end = arrow.get(range_end, \"MM/DD/YYYY hh:mm A\")\n # range_end_format = range_end.format(\"MM/DD/YYYY hh:mm A\")\n\n # Calculate range of minutes between potential start and end given by event creator\n minute_range = []\n for r in arrow.Arrow.range(\"minute\", range_start, range_end):\n minute_range.append(r)\n\n # Attempt to calculate user range of busy times\n try:\n user_start = arrow.get(user_start, \"MM/DD/YYYY hh:mm A\")\n user_end = arrow.get(user_end, \"MM/DD/YYYY hh:mm A\")\n\n user_range = arrow.Arrow.range(\"minute\", user_start, user_end)\n except:\n logger.info(\"MODULE 'free_times' FUNCTION 'free' -- Can't calculate USER range using {} - {}\".format(user_start, user_end))\n # Return empty list on fail\n return []\n\n # Subtract times from user_range from the general minute_range\n for time in user_range:\n if time in minute_range:\n index = minute_range.index(time)\n # None type will be used to generate range in flask_main find_busy_times\n minute_range[index] = None\n \n return minute_range", "def fusion_api_get_vsn_range(self, uri=None, param='', api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param=param)", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def list_fragments(self):\n return list(self.data.fragments)", "def fragmentation(free_resources_gaps, p=2):\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sum(fi**p) / sum(fi)**p)\n frag.set_value(i, frag_i)\n return frag", "def length_n_frags(mol, initial):\n frags = []\n current_frag = initial\n if len(current_frag) >= 4:\n return [current_frag]\n\n neighbor_indices = mol.graph.neighbors[current_frag[-1]]\n for neighbor_ind in neighbor_indices:\n if neighbor_ind not in current_frag:\n new_frag = current_frag + (neighbor_ind, )\n frags += length_n_frags(mol, new_frag)\n return frags", "def sm_get_matching_blocks(s1,s2,min_length=1):\n anslist= list(SM(None, s1, s2).get_matching_blocks())\n\n\n anslist = [ l for l in anslist if l.size>=min_length]\n\n anslist=[ (s1[l.a:l.a+l.size], l.a, l.b, l.size) for l in anslist]\n return anslist", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]", "def list(self, start=0, end=10):\n print(\"GBTIDL> \")", "def read_fragments(filename: str):\n with open(filename, \"r\") as fd:\n # Read fragments and remove linebreaks from string\n fragments = [frag.strip() for frag in fd.readlines()]\n return fragments", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0" ]
[ "0.68656284", "0.66968524", "0.6687345", "0.66764784", "0.64627624", "0.5977098", "0.5810105", "0.57575476", "0.54598117", "0.54573166", "0.52383304", "0.51793027", "0.5142573", "0.5138762", "0.5129117", "0.5112262", "0.498917", "0.49610236", "0.49381578", "0.49207553", "0.4831903", "0.48044276", "0.47601378", "0.47378582", "0.4735213", "0.47266632", "0.47232008", "0.46952236", "0.46668613", "0.4607212" ]
0.73796713
0
Creates a VWWN Range. [Arguments]
def fusion_api_create_vwwn_range(self, body, api=None, headers=None): return self.vwwnrange.create(body, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, range):\n raise NotImplementedError", "def fusion_api_create_vsn_range(self, body, api=None, headers=None):\n return self.vsnrange.create(body, api, headers)", "def Range(self, from: int, to: int) -> BaseVector:", "def make_voig(w,minZ,maxZ,m=mz0,fixw=False):\n cmds = []\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('width[2.49,0,5.0]')\n cmds.append('sigma[1,0,5.0]')\n cmds.append(\"RooVoigtian::voig(x,m,width,sigma)\")\n [w.factory(cmd) for cmd in cmds]\n if fixw:\n w.var('width').setConstant(kTRUE) if w.var('width') else None\n return w.pdf('voig'), kFALSE", "def fusion_api_allocate_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.allocate(body, uri, api, headers)", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def computerange(lyrindex):\n for i in range(len(lyrindex)):\n if i != len(lyrindex) - 1:\n if lyrindex[i][0].find('.') > 0: # special case where inventory files have two records\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+2][1]) - 1) )\n else:\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+1][1]) - 1) )\n else:\n lyrindex[-1].append( 'range=%s' % ( lyrindex[-1][1] ) ) \n return lyrindex", "def boundaries_new(*args):\n return _ida_hexrays.boundaries_new(*args)", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def create_range(range_class):\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n return Range.objects.create(\n name=range_class.name,\n proxy_class=_class_path(range_class))", "def fusion_api_get_vwwn_range(self, uri=None, param='', api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param=param)", "def vrange(starts, stops):\n stops = np.asarray(stops)\n l = stops - starts # Lengths of each range.\n return np.repeat(stops - l.cumsum(), l) + np.arange(l.sum()), l.cumsum()", "def create_grism_waverange(outname=\"\",\n history=\"Ground NIRCAM Grismwavelengthrange\",\n author=\"STScI\",\n filter_range=None):\n ref_kw = common_reference_file_keywords(reftype=\"wavelengthrange\",\n title=\"NIRCAM Grism wavelenghtrange\",\n description=\"NIRCAM Grism+Filter Wavelength Ranges\",\n exp_type=\"NRC_GRISM\",\n author=author,\n model_type=\"WavelengthrangeModel\",\n filename=outname,\n )\n\n if filter_range is None:\n # These numbers from Nor Pirzkal, in microns\n filter_range = {1: {'F250M': [2.500411072, 4.800260833],\n 'F277W': [2.500411072, 3.807062006],\n 'F300M': [2.684896869, 4.025318456],\n 'F322W2': [2.5011293930000003, 4.215842089],\n 'F335M': [3.01459734, 4.260432726],\n 'F356W': [3.001085025, 4.302320901],\n 'F360M': [3.178096344, 4.00099629],\n 'F410M': [3.6267051809999997, 4.5644598],\n 'F430M': [4.04828939, 4.511761774],\n 'F444W': [3.696969216, 4.899565197],\n 'F460M': [3.103778615, 4.881999188],\n 'F480M': [4.5158154679999996, 4.899565197]},\n 2: {'F250M': [2.500411072, 2.667345336],\n 'F277W': [2.500411072, 3.2642254050000004],\n 'F300M': [2.6659796289999997, 3.2997071729999994],\n 'F322W2': [2.5011293930000003, 4.136119434],\n 'F335M': [2.54572003, 3.6780519760000003],\n 'F356W': [2.529505253, 4.133416971],\n 'F360M': [2.557881113, 4.83740855],\n 'F410M': [2.5186954019999996, 4.759037127],\n 'F430M': [2.5362614100000003, 4.541488865],\n 'F444W': [2.5011293930000003, 4.899565197],\n 'F460M': [2.575447122, 4.883350419],\n 'F480M': [2.549773725, 4.899565197]}}\n\n # array of integers\n orders = list(filter_range.keys())\n orders.sort()\n\n # same filters for every order, array of strings\n wrange_selector = list(filter_range[orders[0]].keys())\n wrange_selector.sort()\n\n # The lists below need\n # to remain ordered to be correctly referenced\n wavelengthrange = []\n for order in orders:\n o = []\n for fname in wrange_selector:\n o.append(filter_range[order][fname])\n wavelengthrange.append(o)\n\n ref = wcs_ref_models.WavelengthrangeModel()\n ref.meta.update(ref_kw)\n ref.meta.exposure.p_exptype = \"NRC_GRISM|NRC_TSGRISM\"\n ref.meta.input_units = u.micron\n ref.meta.output_units = u.micron\n ref.wrange_selector = wrange_selector\n ref.wrange = wavelengthrange\n ref.order = orders\n\n entry = HistoryEntry({'description': history, 'time': datetime.datetime.utcnow()})\n sdict = Software({'name': 'nircam_reftools.py',\n 'author': author,\n 'homepage': 'https://github.com/spacetelescope/jwreftools',\n 'version': '0.7.1'})\n entry['sofware'] = sdict\n ref.history['entries'] = [entry]\n ref.to_asdf(outname)\n ref.validate()", "def build_W(points):\n return None", "def create_from_bounds(self, lbs, ubs):\n self.base_vertices = (np.array([lbs])+np.array([ubs])).T/2\n self.base_vectors = np.diag((np.array(ubs)-np.array(lbs))/2)", "def make_voigbg(w,minZ,maxZ,m=mz0,fixw=False):\n cmds = []\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('width[2.49,0,5.0]')\n cmds.append('sigma[1,0,5.0]')\n cmds.append('expar[-0.1,-1,0]')\n cmds.append(\"RooVoigtian::voig(x,m,width,sigma)\")\n cmds.append(\"RooExponential::exp(x,expar)\")\n cmds.append('nvoig[1,0,1000000]')\n cmds.append('nexp[1,0,1000000]')\n cmds.append(\"SUM::voigbg(nvoig*voig,nexp*exp)\")\n [w.factory(cmd) for cmd in cmds]\n if fixw:\n w.var('width').setConstant(kTRUE) if w.var('width') else None\n return w.pdf('voigbg'), kTRUE", "def location_bounds(glimpse_w, input_w):\n offset = float(glimpse_w) / input_w\n lower = (-1 + offset)\n upper = (1 - offset)\n\n assert lower >= -1 and lower <= 1, 'lower must be in (-1,1), is {}'.format(lower)\n assert upper >= -1 and upper <= 1, 'upper must be in (-1,1), is {}'.format(upper)\n\n return lower, upper", "def build_range(identity: str, type_uri: str = SBOL_RANGE):\n start = 1\n end = 1\n obj = Range(PYSBOL3_MISSING, start, end, identity=identity, type_uri=type_uri)\n # Remove the placeholder values\n obj._properties[SBOL_SEQUENCES] = []\n obj._properties[SBOL_START] = []\n obj._properties[SBOL_END] = []\n return obj", "def v(w,s):\n return w", "def _get_sight_range(self):\n raise NotImplementedError", "def range(*args:List[str], negate:bool=False) -> str:\n character_set = \"\"\n for arg in args:\n try:\n start, end = arg\n character_set += f\"{start}-{end}\"\n except:\n raise\n\n negate = \"^\" if negate else \"\"\n return f\"[{negate}{character_set}]\"", "def build_range_entry(var, win):\n\tentry = Gtk.Scale.new(Gtk.Orientation.HORIZONTAL,\n\t\tGtk.Adjustment(var.get(), var.type.low, var.type.up))\n\tentry.set_value(var.get())\n\tentry.set_digits(0)\n\tentry.set_hexpand(True)\n\tobs = RangeEntryObserver(var)\n\tentry.connect(\"change-value\", obs.on_change_value)\n\treturn entry", "def create_follicle_uv(source_object, u_pos, v_pos):\n result = _create_follicle(source_object, uv_position = [u_pos, v_pos])\n return result", "def xpointerNewRange(self, startindex, end, endindex):\n if end is None: end__o = None\n else: end__o = end._o\n ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)\n if ret is None:raise treeError('xmlXPtrNewRange() failed')\n return xpathObjectRet(ret)", "def define_windows(w, data):\n data_w1 = data[0:w, :]\n data_w2 = data[w:w * 2, :]\n data_w3 = data[w * 2:w * 3, :]\n data_w4 = data[w * 3:w * 4, :]\n data_w5 = data[w * 4:w * 5, :]\n data_w6 = data[w * 5:, :]\n\n return data_w1, data_w2, data_w3, data_w4, data_w5, data_w6", "def create_bounds(dict, number_of_nodes):\n\n x_min = dict[\"x_min\"]\n x_max = dict[\"x_max\"]\n u_min = dict[\"u_min\"]\n u_max = dict[\"u_max\"]\n\n v_min = []\n v_max = []\n for k in range(number_of_nodes - 1):\n v_min += x_min\n v_max += x_max\n v_min += u_min\n v_max += u_max\n\n if \"tf_min\" in dict:\n if \"tf_max\" in dict:\n tf_min = dict[\"tf_min\"]\n tf_max = dict[\"tf_max\"]\n v_min.append(tf_min)\n v_max.append(tf_max)\n\n v_min += x_min\n v_max += x_max\n\n return vertcat(*v_min), vertcat(*v_max)", "def window_bounds(i, n, wn):\n\n at_end = False\n hw = wn // 2\n\n start = i - hw\n stop = i + hw + 1\n\n if start < 0:\n at_end = True\n start = 0\n elif stop > n:\n at_end = True\n stop = n\n\n return start, stop, at_end", "def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)", "def make_2d_butterworth_window(width, height, u, v, n):\n xcenter = np.ceil(width / 2.0) - 1.0\n ycenter = np.int16(np.ceil(height / 2.0) - 1)\n xlist = np.arange(width) - xcenter\n window = 1.0 / (1.0 + np.power(xlist / u, 2 * n))\n row1 = ycenter - np.int16(v)\n row2 = ycenter + np.int16(v) + 1\n window_2d = np.ones((height, width), dtype=np.float32)\n window_2d[row1:row2] = window\n return window_2d" ]
[ "0.57634646", "0.55075175", "0.5502798", "0.5441266", "0.5416495", "0.5335062", "0.5298344", "0.52322584", "0.5223062", "0.5213523", "0.519298", "0.51765895", "0.5167295", "0.5154946", "0.5150673", "0.5148411", "0.513757", "0.5063334", "0.50298923", "0.49978057", "0.49857804", "0.49742234", "0.491213", "0.48411384", "0.48240712", "0.4820495", "0.48174798", "0.48049322", "0.47988898", "0.4782836" ]
0.6669603
0
Updates an VWWN Range. [Arguments]
def fusion_api_edit_vwwn_range(self, body, uri, api=None, headers=None): return self.vwwnrange.update(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_upw(self, arg):\n self.do_timesheet('update week')", "def update_velocities(self, wx, wy):\r\n self.wx = wx\r\n self.wy = wy", "def fusion_api_edit_vsn_range(self, body, uri, api=None, headers=None):\n return self.vsnrange.update(body, uri, api, headers)", "def fusion_api_create_vwwn_range(self, body, api=None, headers=None):\n return self.vwwnrange.create(body, api, headers)", "def update(self, x: int, y: float, w: float) -> None:\n while len(self.x) < x - 1:\n self.x.append(len(self.x))\n self.y.append(y)\n self.w.append(w)\n if self.w[x - 1] <= w:\n self.x[x - 1] = x\n self.y[x - 1] = y\n self.w[x - 1] = w", "def update(self, v, r):\n pass", "def setWeights(self, w):\n raise NotImplementedError", "def update(*args):", "def update(self, **kwargs):\n for kwarg in kwargs:\n if kwarg not in self._ALLOWED_KEYWORDS:\n raise NNDCInputError(f'Unknown keyword: \"{kwarg}\"')\n if \"nuc\" in kwargs:\n self._data[\"spnuc\"] = \"name\"\n self._data[\"nuc\"] = kwargs[\"nuc\"]\n for x in [\"z\", \"a\", \"n\"]:\n # handle Z, A, and N settings\n if x in kwargs:\n self._data[\"spnuc\"] = \"zanrange\"\n self._data[x + \"min\"], self._data[x + \"max\"] = _format_range(\n (kwargs[x], kwargs[x])\n )\n # handle *_range, *_any, *_odd, *_even\n elif x + \"_range\" in kwargs:\n self._data[\"spnuc\"] = \"zanrange\"\n self._data[x + \"min\"], self._data[x + \"max\"] = _format_range(\n kwargs[x + \"_range\"]\n )\n if self._data[x + \"min\"] == \"\":\n self._data[x + \"min\"] = \"0\"\n if self._data[x + \"max\"] == \"\":\n self._data[x + \"max\"] = \"300\"\n if x + \"_any\" in kwargs:\n self._data[\"even\" + x] = \"any\"\n elif x + \"_even\" in kwargs:\n self._data[\"even\" + x] = \"even\"\n elif x + \"_odd\" in kwargs:\n self._data[\"even\" + x] = \"odd\"\n # handle half-life range condition\n if \"t_range\" in kwargs:\n self._data[\"tled\"] = \"enabled\"\n self._data[\"tlmin\"], self._data[\"tlmax\"] = _format_range(kwargs[\"t_range\"])", "def do_upm(self, arg):\n self.do_timesheet('update week')", "def update(self):\n self._sync_ranges()\n self._update_params()", "def do_up(self, arg):\n self.do_timesheet('update %s' % arg)", "def update_frame(self, key, ranges=None):", "def ir_vol_update():\n wb = xw.Book.caller()\n ws = wb.sheets(\"IR_VOL\")\n\n # Declare connection object \n swaption_vol = ws.range(\"SwaptionVol\").value\n cap_vol = ws.range(\"CapVol\").value\n engine = utils.db_engine(database = 'otcora', schema = 'OTCUSER', password = 'otcuser')\n Session = sessionmaker(bind=engine)\n session = Session()\n # The code below inputs swaption vol data\n updater.updater(data = swaption_vol, table_name = 'ficc_swaption_atm' ,\n head_nullable_data=4, date_index = 0, factor = 0.0001,\n data_name = 'swaption vol/premium data',\n engine = engine, session = session)\n # The code below inputs cap vol data\n updater.updater(data = cap_vol, table_name = 'ficc_cap_atm',\n head_nullable_data=3, date_index = 0, factor = 0.0001,\n data_name = 'cap vol/premium data',\n engine = engine, session = session)\n session.close()\n engine.dispose() \n \n #utils.Mbox(\"\", \"swaption & cap vol done\", 0)", "def update(self, v_input):\n\n self.v = v_input", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits", "def _modify_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n modifier: Callable[[str, int, str], Any],\n datacenter_id: int,\n datacenter_name: str,\n) -> List[Any]:\n if (not start_letter.isalpha) or (not stop_letter.isalpha):\n raise InvalidRangeError\n\n if start_number < 1 or start_number > stop_number:\n raise InvalidRangeError\n\n if start_letter.upper() > stop_letter.upper():\n raise InvalidRangeError\n\n alphabet: str = string.ascii_uppercase\n letters: str = alphabet[\n alphabet.index(start_letter.upper()) : alphabet.index(stop_letter.upper()) + 1\n ]\n\n results: List[Any] = []\n try:\n for letter in letters:\n for number in range(start_number, stop_number + 1):\n label = f\"{letter}{number}\"\n results.append(modifier(label, datacenter_id, datacenter_name))\n except (\n DBWriteException,\n InvalidRangeError,\n RackNotEmptyError,\n RackDoesNotExistError,\n ):\n raise\n\n return results", "def updateRPC(loc,weight): #status: Done, not tested\r\n pass", "def setWeight(self, w):\n self._W = w", "def UpgradeWeapon(self):\n label = self.wepSpin.get()\n for index in range(min(self.squad.current_size, self.unitToWeap[label][1])):\n upgradedUnit = next(x for x in self.squad.units if x.name == self.unitToWeap[label][3])\n upgradedUnit.armRangedWeapon(weapon.ranged_weapons[self.unitToWeap[label][0]])\n self.squad.point_cost += self.unitToWeap[label][2] \n self.pointLabel['text'] = self.squad.point_cost", "def update_view(self, w: Wrapper) -> None:\n\n w.setStyleSheet(\"/* */\") # forces visual update", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def update_V(self, Vs, Vs_next, reward, alpha, gamma):\r\n return Vs + alpha * (reward + gamma * Vs_next - Vs)", "def update(self, *args, **kw):\n pass", "def update_w(self, w):\n # Need to update the scaled weights\n if self.scale_weights:\n self._scale_weights_to_degree(w)\n self._generate_weighted_adj_matrices()\n # once we get new DW matrices, multiply by weights\n super().update_w(w)\n self._degree_weight_weighted_matrices()", "def inplace_update(x, indices, v, y, kernel_name=\"inplace_update\"):\n output_reslut = InplaceUpdate(x, indices, v)\n return output_reslut.tik_instance_function(kernel_name)", "def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)", "def computerange(lyrindex):\n for i in range(len(lyrindex)):\n if i != len(lyrindex) - 1:\n if lyrindex[i][0].find('.') > 0: # special case where inventory files have two records\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+2][1]) - 1) )\n else:\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+1][1]) - 1) )\n else:\n lyrindex[-1].append( 'range=%s' % ( lyrindex[-1][1] ) ) \n return lyrindex" ]
[ "0.5747974", "0.5369474", "0.5360797", "0.53398865", "0.5231402", "0.51893294", "0.5051157", "0.5046109", "0.5045378", "0.5031286", "0.500662", "0.49857685", "0.49686584", "0.49669436", "0.49450877", "0.49248904", "0.48962656", "0.48891562", "0.48767418", "0.4846663", "0.48253828", "0.4800413", "0.47929573", "0.47846517", "0.47819123", "0.4778705", "0.47694305", "0.47679013", "0.47591913", "0.47457165" ]
0.64988935
0
Deletes a VWWN range based on name OR uri. [Arguments]
def fusion_api_delete_vwwn_range(self, name=None, uri=None, api=None, headers=None): return self.vwwnrange.delete(name, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)", "def delete_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> None:\n _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_delete_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def fusion_api_delete_ipv4_range(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4range.delete(name, uri, api, headers)", "def fusion_api_delete_vmac_range(self, name=None, uri=None, api=None, headers=None):\n return self.vmacrange.delete(name, uri, api, headers)", "def deleteAddressRange(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def DeleteRange(self, r):\n self.__context.builder.DocumentDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end)\n left = self._blip_data.content[:r.start]\n right = self._blip_data.content[r.end + 1:]\n self._blip_data.content = left + right", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def DeleteAnnotationsInRange(self, r, name):\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end,\n name)\n # TODO(davidbyttow): split local annotations.", "def delete(fits: Optional[str], start: Optional[str], end: Optional[str], out: Optional[str]):\n delete_in_ssda(fits=fits, start=start, end=end, out=out)", "def delete_UI_transaction_range(account):\n\t_day1 = read_day_range('start')\n\t_day2 = read_day_range('end')\n\tif (_day1 > _day2):\n\t\tprint('Perioada invalida.')\n\telse:\n\t\tdeleted = delete_transaction_range(account, _day1, _day2)\n\t\tif (not deleted):\n\t\t\tprint('Nu s-a efectuat nici o stergere.')\n\t\telse:\n\t\t\tprint('Stergere finalizata.')", "def delete(self, uri, where, selectionArgs):\n pass", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def Multi_vm_delete(self, vm_name, s_range, e_range):\n try:\n for i in range(s_range, e_range):\n new_name = vm_name + \"%s\" % i\n self.destroy_vm(new_name)\n except Exception as error:\n print(error.message)\n raise error", "def remove(name):", "def delete_this_region(self):", "def UnsafeDestroyRange(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)", "def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)", "def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)", "def doDelete(self, upperRow, upperCol, lowerRow, lowerCol):\n if app.config.strict_debug:\n assert isinstance(upperRow, int)\n assert isinstance(upperCol, int)\n assert isinstance(lowerRow, int)\n assert isinstance(lowerCol, int)\n assert upperRow <= lowerRow\n assert upperRow != lowerRow or upperCol <= lowerCol\n if self.selectionMode == kSelectionBlock:\n self.parser.deleteBlock(upperRow, upperCol, lowerRow, lowerCol)\n elif (self.selectionMode == kSelectionNone or\n self.selectionMode == kSelectionAll or\n self.selectionMode == kSelectionCharacter or\n self.selectionMode == kSelectionLine or\n self.selectionMode == kSelectionWord):\n self.parser.deleteRange(upperRow, upperCol, lowerRow, lowerCol)", "def delete_book(code: str):\n pass", "def DeleteRange(self, rangeText, silent=False):\n startTaxon, stopTaxon, startColumn, stopColumn = self.ParseIndex(rangeText)\n if (self.translated == True):\n startColumn = startColumn * 3\n stopColumn = (stopColumn * 3) + 2\n if (startTaxon >= 0): #Make sure we had a valid range\n changeLength = 0\n deleteTaxon = False\n if ((startColumn == 0) & (stopColumn == len(self.alignment[0]) - 1)):\n deleteTaxon = True\n if ((startTaxon > 0) | (stopTaxon < len(self.alignment) - 1)):\n changeLength = (stopColumn - startColumn) + 1\n taxon = 0\n newSequences = []\n for Sequence in self.alignment:\n if (taxon in range(startTaxon, stopTaxon + 1)):\n if (not deleteTaxon):\n if (startColumn > 0):\n Sequence.seq = Sequence.seq[:startColumn] + Sequence.seq[stopColumn + 1:]\n else:\n Sequence.seq = Sequence.seq[stopColumn + 1:]\n if (changeLength):\n Sequence.seq = Sequence.seq + Seq('-' * changeLength)\n newSequences.append(Sequence)\n else:\n newSequences.append(Sequence)\n taxon += 1\n self.alignment = MultipleSeqAlignment(newSequences)\n if (not silent):\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def deleteAttributeRange(self, startKey=None, endKey=None, limit=None):\n self.graph.deleteExtendedAttributeRange(entityId, startKey, endKey, limit)", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def fusion_api_delete_ipv4_subnet(self, name=None, uri=None, api=None, headers=None):\n return self.ipv4subnet.delete(name, uri, api, headers)", "def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)" ]
[ "0.76495445", "0.6708078", "0.6698595", "0.66175586", "0.6400217", "0.63573074", "0.58918494", "0.5551983", "0.5468459", "0.54335", "0.539991", "0.53929645", "0.5274481", "0.5232686", "0.51956296", "0.5192239", "0.51262724", "0.5093625", "0.5067205", "0.50319296", "0.5022588", "0.5009807", "0.49966523", "0.49730557", "0.4959366", "0.49489254", "0.4927398", "0.49231625", "0.49137002", "0.48698887" ]
0.80351084
0
Gets a default or paginated collection of VWWN Ranges. [Arguments]
def fusion_api_get_vwwn_range(self, uri=None, param='', api=None, headers=None): return self.vwwnrange.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))", "def test_get_range(self):\n pass", "def fusion_api_collect_vwwn_range(self, body, uri, api=None, headers=None):\n return self.vwwnrange.collect(body, uri, api, headers)", "def get_rack_range(\n start_letter: str,\n stop_letter: str,\n start_number: int,\n stop_number: int,\n datacenter_id: int,\n datacenter_name: str,\n) -> List[JSON]:\n return _modify_rack_range(\n start_letter=start_letter,\n stop_letter=stop_letter,\n start_number=start_number,\n stop_number=stop_number,\n modifier=_get_rack_modifier,\n datacenter_id=datacenter_id,\n datacenter_name=datacenter_name,\n )", "def range_inclusive(start, stop):\n return range(start, stop + 1)", "def ranges(self):\n return self._ranges", "def zrange(self, name, start, end, desc=False, withscores=False):\r\n if desc:\r\n return self.zrevrange(name, start, end, withscores)\r\n pieces = ['ZRANGE', name, start, end]\r\n if withscores:\r\n pieces.append('withscores')\r\n return self.format_inline(*pieces, **{'withscores': withscores})", "def get_range(n0: int, n1: int, ns: int) -> List[int]:\n # Return a range as a list\n def lrange(a, b, n=1) -> List[int]:\n return list(range(a, b, n))\n # Get the in-bounds part of the range\n n_range = lrange(max(0, n0), min(ns, n1))\n # Handle out-of-bounds indices by reflection across boundaries\n if n0 < 0:\n # Underflow\n n_range = lrange(-n0, 0, -1) + n_range\n if n1 > ns:\n # Overflow\n n_range = n_range + lrange(ns - 1, 2 * ns - n1 - 1, -1)\n\n return n_range", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def lrange(self, name, start, end):\n self.connect()\n self._write('LRANGE %s %s %s\\r\\n' % (name, start, end))\n return self._get_multi_response()", "def change_default_range(networks, number_excluded_ips,\n cut_from_start=True):\n for default_network in filter(\n lambda x: ((x['name'] != 'fuelweb_admin')and\n (x['name'] != 'private')),\n networks):\n default_range = [netaddr.IPAddress(str(ip)) for ip\n in default_network[\"ip_ranges\"][0]]\n if cut_from_start:\n new_range = [default_range[0],\n default_range[0] + number_excluded_ips]\n else:\n new_range = [default_range[0] + number_excluded_ips + 1,\n default_range[1]]\n default_network[\"ip_ranges\"][0] = [str(ip)\n for ip in new_range]", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums", "def range(self):\n return self.range_array", "def byrange(self, start, stop):\n\t\treturn ElementsByRange(self.AEM_want, self, (start, stop))", "def getPageRange(base_url, node, page_range, page_size, from_date=None, to_date=None, delay=None):\n\n\tdocs = None\n\n\tfor p in page_range:\n\t\tprint \"Getting page %d\" % (p)\n\n\t\tpage_result = getPage(base_url, node, p, from_date=from_date, to_date=to_date)\n\t\tif docs is None:\n\t\t\tdocs = page_result\n\t\telse:\n\t\t\tdocs = docs.append(page_result)\n\n\t\tif delay is not None:\n\t\t\ttime.sleep(delay)\n\n\treturn docs", "def range_callback(data):\n global D\n D.ranges = data.ranges", "def ranges(self) -> List[Range]:\n return list(iter(self._ranges))", "def _get_sight_range(self):\n raise NotImplementedError", "def lrange(self, name, start, end):\r\n return self.format_inline('LRANGE', name, start, end)", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def get_range(start, stop):\n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n print(nums)", "def getRange(self):\n return self.range", "def fusion_api_create_vwwn_range(self, body, api=None, headers=None):\n return self.vwwnrange.create(body, api, headers)", "def range() -> List[int]:\n pass", "def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end", "def __pages_range(self):\n return range(1, self.total_pages + 1)" ]
[ "0.5967996", "0.581678", "0.57551146", "0.57523656", "0.57495195", "0.5730547", "0.5714912", "0.5713446", "0.5653016", "0.564608", "0.5578608", "0.552246", "0.5521656", "0.55034316", "0.5494489", "0.54812235", "0.5470905", "0.54666096", "0.54648674", "0.54303795", "0.54171914", "0.54171914", "0.54171914", "0.54171914", "0.54140747", "0.53998494", "0.53993994", "0.537964", "0.5368252", "0.53679836" ]
0.6412068
0
Returns all fragments that have been allocated from a VWWN Range [Arguments]
def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None): return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def getMemrange(self, fromadr, toadr):\n res = b''\n toadr = toadr + 1 # python indxes are excluding end, so include it\n while fromadr < toadr:\n # print(\"fromto: %04x %04x\" % (fromadr, toadr))\n for seg in self.segments:\n # print(seg)\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr < segend:\n # print(\"startok 0x%04x %d\" % (seg.startaddress, len(seg.data)))\n # print((\"0x%04x \"*3) % (segend, fromadr, toadr))\n if toadr > segend: # not all data in segment\n # print(\"out of segment\")\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n # print(toadr-fromadr)\n # print(catchlength)\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength # adjust start\n if len(res) >= toadr-fromadr:\n break # return res\n else:\n res += b'\\xff'\n fromadr = fromadr + 1 # adjust start\n #print(\"fill FF\")\n # print(\"res: %r\" % res)\n return res", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fragments(self):\n return len(self.byteruns())", "def getMemrange(self, fromadr, toadr):\n res = ''\n toadr = toadr + 1 #python indxes are excluding end, so include it\n while fromadr < toadr:\n for seg in self.segments:\n segend = seg.startaddress + len(seg.data)\n if seg.startaddress <= fromadr and fromadr < segend:\n if toadr > segend: #not all data in segment\n catchlength = segend-fromadr\n else:\n catchlength = toadr-fromadr\n res = res + seg.data[fromadr-seg.startaddress : fromadr-seg.startaddress+catchlength]\n fromadr = fromadr + catchlength #adjust start\n if len(res) >= toadr-fromadr:\n break #return res\n else: #undefined memory is filled with 0xff\n res = res + chr(255)\n fromadr = fromadr + 1 #adjust start\n return res", "def list_fragments(self):\n return list(self.data.fragments)", "def fragments(self):\n return self.fragments_tree.vchildren_not_empty", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def populate_ranges(self,):\n self.ranges = list()\n # coredump: info target shows all sections in full detail\n # live debug: only file-backed sections are shown\n targetinfo = gdb.execute(\"info target\", False, True)\n for line in targetinfo.splitlines():\n line = line.strip()\n if line.startswith('`'):\n line = line.split(\"'\")[1]\n source = line[1:]\n continue\n if not line.startswith(\"0x\"):\n continue\n\n start, dash, end, str_is, memtype = line.split(maxsplit=4)\n assert(dash == '-' and str_is == 'is')\n start = int(start, 16)\n end = int(end, 16)\n new_range = MemoryRange(start, end-start, source, memtype)\n startoverlap = self.get_range(start)\n endoverlap = self.get_range(end)\n\n if endoverlap == startoverlap:\n endoverlap = None\n\n #TODO: splitup and punch holes/replace\n if memtype.startswith('.'):\n # gdb reports loadXXX sections on top of file-backed sections of the binary\n # probably because the kernel maps writeable pages on top of them\n # Therefore, keep the more accurate description from the file-backed section\n if startoverlap is not None and startoverlap.memtype == MemoryType.General:\n previous, current = self.split_range_at(start)\n self.ranges.remove(current)\n startoverlap = None\n if endoverlap is not None and endoverlap.memtype == MemoryType.General:\n current, end = self.split_range_at(end)\n self.ranges.remove(current)\n endoverlap = None\n\n if startoverlap is not None and endoverlap is not None:\n print(\"Overlapping memory ranges: %s in %s -> %s\" %\n (new_range, str(startoverlap), str(endoverlap)))\n bisect.insort(self.ranges, new_range)\n\n # live target: run-time allocated memory and some file-backed sections\n # There typically is overlap with the 'info target' output, so give precedence\n # to the previously added ranges\n mappinginfo = gdb.execute(\"info proc mappings\", False, True)\n for line in mappinginfo.splitlines():\n line = line.strip()\n if not line.startswith(\"0x\"):\n continue\n\n items = line.split()\n if len(items) == 4:\n start, end, size, offset = items\n source = \"unknown\"\n elif len(items) == 5:\n start, end, size, offset, source = items\n else:\n print(\"Unexpected line when parsing 'info proc mappings': %s\" % line)\n continue\n\n start = int(start, 16)\n size = int(size, 16)\n end = int(end, 16)\n\n new_range = MemoryRange(start, size, source, source)\n self.tentative_add_range(new_range)", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def getChunks():", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def find_all(v):\n screen = G.DEVICE.snapshot(quality=ST.SNAPSHOT_QUALITY)\n return v.match_all_in(screen)", "def get_claimed_objects_in_range(start, stop):\n return RawPlantActivity.objects.filter(\n TS_LOAD__gte=start,\n TS_LOAD__lte=stop,\n POOL_CD__exact='03',\n )", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def between_blocks(self, frame):\n return []", "def range_table(self):\n range_table_base = []\n if self.block_mask != None:\n range_table_length = len(self.block_mask)\n else:\n range_table_length = self.block_num\n\n for i in range(range_table_length):\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.filter_num))\n range_table_base.append(len(self.k_size))\n range_table_base.append(len(self.pool_type))\n\n return range_table_base", "def fragment_length_filter(fragment_anno_dic):\n out_list = []\n total_fragment = 0\n for key in fragment_anno_dic.keys():\n #print fragment_anno_dic[key]\n fragments_flag = []\n fragments_length = []\n fragments_region = []\n total_fragment += int(fragment_anno_dic[key][0][-3])\n reads_coverage = [x[-3] for x in fragment_anno_dic[key]]\n if len(list(set(reads_coverage))) != 1:\n print (fragment_anno_dic[key])\n if len(fragment_anno_dic[key]) == 1:\n fragment_anno_dic[key][0] = list(fragment_anno_dic[key][0])\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n for i in range(0,len(fragment_anno_dic[key])):\n fragment_anno_dic[key][i] = list(fragment_anno_dic[key][i])\n iso = fragment_anno_dic[key][i]\n iso_length = sum([int(x) for x in iso[10].split(',')])\n fragments_length.append(iso_length)\n fragments_flag.append(iso[-2])\n fragments_region.append(iso[8])\n #print fragment_anno_dic[key]\n#---------------------------------------------------------------- complete fragments (Set region preference)\n region_complete = [''] * len(fragments_flag)\n max_flag = max(fragments_flag)\n #print fragments_length,fragments_region,fragments_flag\n if max_flag == 3:\n for x in range(0,len(fragments_flag)):\n if fragments_flag[x] == max_flag:\n fragment_anno_dic[key][x][-2] = str(fragment_anno_dic[key][x][-2])\n region_complete[x] = fragments_region[x]\n # Set preference\n if 'CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS')]))\n elif '5UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR')]))\n elif '3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('3UTR')]))\n elif '5UTR-CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS-3UTR')]))\n elif 'intron' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('Null')]))\n else:\n print (fragment_anno_dic[key])\n print ('Gene type error!')\n#----------------------------------------------------------------- incomplete fragments (choose the longest fragments)\n elif max_flag == 2:\n max_length_list = [0] * len(fragments_length)\n max_region_list = [''] * len(fragments_length)\n for y in range(0,len(fragments_flag)):\n if fragments_flag[y] == max_flag:\n max_length_list[y] = fragments_length[y]\n #print max_length_list\n max_length = max(max_length_list)\n #print max_length\n for z in range(0,len(max_length_list)):\n if max_length_list[z] == max_length:\n fragment_anno_dic[key][z][-2] = str(fragment_anno_dic[key][z][-2])\n max_region_list[z] = fragments_region[z]\n #print max_region_list\n # Set preference\n if 'CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS')]))\n elif '5UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR')]))\n elif '3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('3UTR')]))\n elif '5UTR-CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS-3UTR')]))\n elif 'intron' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('Null')]))\n elif max_flag == 1: #Not annotated to exon region\n fragment_anno_dic[key][fragments_flag.index(1)][-2] = str(fragment_anno_dic[key][fragments_flag.index(1)][-2])\n # print (fragment_anno_dic[key])\n out_list.append('\\t'.join(fragment_anno_dic[key][fragments_flag.index(1)]))\n elif max_flag == 0: #Not annotated to intragenic region\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n print (fragment_anno_dic[key])\n print ('Please check flag information')\n print ('Total fragments after filtering 1: ' + str(total_fragment))\n return out_list", "def compute_fragments(self):\n self.fragments = []\n for part in self.parts:\n for fragment in self.compute_digest(part):\n # The part is not a fragment if it hasn't been cut at all and\n # therefore doesn't have sticky ends. Exclude from fragments.\n if not hasattr(fragment.seq, \"left_end\"):\n continue\n fragment.original_part = part\n self.annotate_fragment_with_part(fragment)\n self.fragments.append(fragment)", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]", "def getSegments(self) -> List[int]:\n ...", "def _window(region, start_index, end_index):\n last_index = len(region) + 1\n selected_tokens = region[max(start_index, 0):min(end_index, last_index) + 1]\n return selected_tokens" ]
[ "0.63622516", "0.6262221", "0.60084426", "0.5877402", "0.5860668", "0.5755313", "0.557767", "0.5523148", "0.5457254", "0.5449253", "0.54406774", "0.5383746", "0.5366585", "0.52960277", "0.528034", "0.51783717", "0.5175531", "0.513877", "0.51113105", "0.51074564", "0.5065128", "0.5054756", "0.5024139", "0.5019665", "0.50048304", "0.499126", "0.4984162", "0.49829817", "0.49829414", "0.4971994" ]
0.6855869
0
Returns all the free fragments in a VWWN Range. [Arguments]
def fusion_api_get_vwwn_range_free_fragments(self, uri, api=None, headers=None): return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_get_vsn_range_free_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vmac_range_free_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def get_free_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/free-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def fusion_api_get_vwwn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vwwnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def fusion_api_get_ipv4_range_free_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/free-fragments')", "def fusion_api_get_vsn_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vsnrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def fusion_api_get_vmac_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.vmacrange.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_allocated_fragments(self, uri, count=-1, start=0):\n uri = uri + \"/allocated-fragments?start={0}&count={1}\".format(start, count)\n return self._helper.get_collection(uri)", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def fragments(self):\n return self.fragments_tree.vchildren_not_empty", "def free(range_lst, range_start, range_end, user_start, user_end):\n \n # Attempt to calculate range to subtract times from\n minute_range = []\n # range_start = arrow.get(range_start, \"MM/DD/YYYY hh:mm A\")\n # range_start_format = range_start.format(\"MM/DD/YYYY hh:mm A\")\n # range_end = arrow.get(range_end, \"MM/DD/YYYY hh:mm A\")\n # range_end_format = range_end.format(\"MM/DD/YYYY hh:mm A\")\n\n # Calculate range of minutes between potential start and end given by event creator\n minute_range = []\n for r in arrow.Arrow.range(\"minute\", range_start, range_end):\n minute_range.append(r)\n\n # Attempt to calculate user range of busy times\n try:\n user_start = arrow.get(user_start, \"MM/DD/YYYY hh:mm A\")\n user_end = arrow.get(user_end, \"MM/DD/YYYY hh:mm A\")\n\n user_range = arrow.Arrow.range(\"minute\", user_start, user_end)\n except:\n logger.info(\"MODULE 'free_times' FUNCTION 'free' -- Can't calculate USER range using {} - {}\".format(user_start, user_end))\n # Return empty list on fail\n return []\n\n # Subtract times from user_range from the general minute_range\n for time in user_range:\n if time in minute_range:\n index = minute_range.index(time)\n # None type will be used to generate range in flask_main find_busy_times\n minute_range[index] = None\n \n return minute_range", "def calculate_fragmentation():\n \n with open(\"/proc/buddyinfo\", 'r') as buddyinfo_output:\n return _calculate_fragmentation(buddyinfo_output)", "def fragments(self):\n return len(self.byteruns())", "def fusion_api_get_ipv4_range_allocated_fragments(self, uri, api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param='/allocated-fragments')", "def get_fragments_for_mdv_calculation(self):\n return list(self.fragments_for_mdv_calculation)", "def select_vertices_free(self):\n _filter = compas_rhino.rs.filter.point\n guids = compas_rhino.rs.GetObjects(message=\"Select Free Vertices.\", preselect=True, select=True, group=False, filter=_filter)\n if guids:\n keys = [self.guid_vertex_free[guid] for guid in guids if guid in self.guid_vertex_free]\n else:\n keys = []\n return keys", "def list_fragments(self):\n return list(self.data.fragments)", "def get_free_standins(group):", "def get_free_indices(program, program_len):\n used = get_used_indices(program)\n total = set(range(program_len + len(program.input_types)))\n return total - used", "def fragmentation(free_resources_gaps, p=2):\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sum(fi**p) / sum(fi)**p)\n frag.set_value(i, frag_i)\n return frag", "def find_free(self):\n\n free_position = np.where(self.block == 0)\n free_position = np.array(free_position).flatten()\n return free_position", "def getChunks():", "def free_slots(self, day_bounds: Slot):\n free_slots: List[Slot] = []\n time_ptr = day_bounds.start\n for meeting in self.meetings:\n if meeting.start > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, meeting.start.time_str))\n time_ptr = meeting.end\n if day_bounds.end > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, day_bounds.end.time_str))\n return free_slots", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def _fragment_seq(self, seq):\r\n num_fragments = self.Params['num_fragments']\r\n results = []\r\n start = 0\r\n for i in range(num_fragments):\r\n # My notes:\r\n # len(seq[i::n]) gives the number of even multiples of\r\n # num_fragments exist between i (inclusive) and the end of the seq.\r\n stop = start + len(seq[i::num_fragments])\r\n results.append(seq[start:stop])\r\n start = stop\r\n return results", "def get_free_games(self) -> List[Game]:", "def get_fragments(fragment_size, offset, chunk_size):\n if is_fragmentable(fragment_size, offset, chunk_size):\n return [tokens[x:x + fragment_size] for x in xrange(0, len(chunk_size), offset)]", "def print_fragmentation():\n\n frag_dict = calculate_fragmentation()\n \n _print_fragmentation(frag_dict, sys.stdout)", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result" ]
[ "0.6687681", "0.660191", "0.64491975", "0.64270025", "0.63058704", "0.5867157", "0.57165587", "0.54287314", "0.54160684", "0.5410601", "0.5342903", "0.5289316", "0.52690816", "0.5241172", "0.5237342", "0.51737046", "0.51662946", "0.5020625", "0.49955836", "0.49786228", "0.4969614", "0.490738", "0.48687643", "0.48425514", "0.48145136", "0.47990113", "0.47934648", "0.47638264", "0.47632426", "0.47542888" ]
0.72093993
0
Gets a default or paginated collection of Interconnect Types. [Arguments]
def fusion_api_get_interconnect_types(self, param='', api=None, headers=None): return self.ictypes.get(api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)", "def get_integrations_types(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'sort_by', 'expand', 'next_page', 'previous_page']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_types\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/types'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='IntegrationTypeEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def get_types(self):\n return self.types", "async def incidentTypes(self, includeHidden: bool = False) -> Iterable[str]:", "def get_types(self) :\n\n return list(self.types)[1:]", "def getPrimaryTypes() -> List[int]:\n ...", "def fusion_api_get_sas_interconnect_types(self, uri=None, param='', api=None, headers=None):\n return self.sasictypes.get(uri=uri, api=api, headers=headers, param=param)", "def ntypes(self): # -> list[None]:\n ...", "def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)", "def types_clients_view(request):\n query = request.dbsession.query(ClientType).all()\n return Utils.serialize_many(query)", "def get_all_by_incident_type(\n *, db_session, incident_type: str, skip=0, limit=100\n) -> List[Optional[Incident]]:\n return (\n db_session.query(Incident)\n .filter(Incident.incident_type.name == incident_type)\n .offset(skip)\n .limit(limit)\n .all()\n )", "def ntypes(self): # -> list[str]:\n ...", "def get_informatieobjecttypen(\n clients: List[Client] = None,\n) -> List[InformatieObjectType]:\n if clients is None:\n clients = _get_ztc_clients()\n\n catalogi = {cat.url: cat for cat in get_catalogi(clients=clients)}\n iots = _fetch_list(\"informatieobjecttype\", clients, InformatieObjectType)\n\n # resolve relations\n for iot in iots:\n iot.catalogus = catalogi[iot.catalogus]\n\n return iots", "def Institutions(self, default=[{}]):\n tmp = self.data.get('institutions', default)\n return [HEP.InstitutionObject(i) for i in tmp]", "def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])", "def get_items(self):\n return self._internal_type_mapping", "def iterate_types(self) -> Iterator[FakeAnnotation]:\n yield from self.client.iterate_types()\n if self.service_resource:\n yield from self.service_resource.iterate_types()\n for waiter in self.waiters:\n yield from waiter.iterate_types()\n for paginator in self.paginators:\n yield from paginator.iterate_types()", "def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}", "def getTypes():\n\n\t\tquery = \"\\\n\t\t\tSELECT\\\n\t\t\t\tid_item_container_type,\\\n\t\t\t\tlabel\\\n\t\t\tFROM\\\n\t\t\t\titem_container_type\\\n\t\t\"\n\n\t\treturn {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}", "def getImmediatelyAddableTypes(self, context=None):\n return self.getLocallyAllowedTypes()", "def InspireCategories(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('inspire_categories', default)\n return [HEP.InspireFieldObject(i) for i in tmp]", "def get_catalog_search_record_types(self):\n return TypeList([])", "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def listAffiliationType(self):\n return self.get_json('/affiliationType')", "def document_types(db: Session = Depends(get_db)):\n return get_document_types(db)", "def by_type(self, types=None):\n return self.describe(only_type=types)", "def get_all_documents(self, type: Type) -> List[DocumentReference]:\n runners = []\n collection = self.client.collection(type.value).list_documents()\n for document in collection:\n runners.append(document)\n\n return runners", "def ntypes(self): # -> None:\n ...", "def get(self):\n records = None\n with open_session() as session:\n try:\n records = session.query(BiometricType).all()\n except Exception as error:\n logger.exception(\"Exeption: %s\" % (str(error)))\n return gen_response(\"Internal server error\")\n\n logger.debug(records)\n rlist = [to_dict(record) for record in records]\n return gen_response(rlist)" ]
[ "0.60877544", "0.5867347", "0.5823513", "0.5725122", "0.5676197", "0.5670475", "0.5616632", "0.5583125", "0.5488655", "0.5477379", "0.5417193", "0.5391023", "0.5375417", "0.5344877", "0.5328458", "0.5319178", "0.5217222", "0.52147555", "0.51954097", "0.5195361", "0.5157867", "0.51427597", "0.51103616", "0.51059645", "0.51000154", "0.5059992", "0.5035712", "0.5033409", "0.50079656", "0.5006569" ]
0.66850644
0
Gets the port statistics details for givenInterconnect [Arguments]
def fusion_api_get_interconnect_port_statistics(self, uri, param='', api=None, headers=None): param = '/statistics/%s' % param return self.ic.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PortStatistics(self):\n return self._get_attribute('portStatistics')", "def traffic_stats(self, *args, **kwargs):\n if 'port_handle' not in kwargs:\n kwargs.setdefault('port_handle', self.ports)\n # If mode has not been defined, use default value\n kwargs.setdefault(\"mode\", \"aggregate\")\n res = self.hltapi.traffic_stats(**kwargs)\n self.check_res(res)\n return {x: res[x] for x in kwargs['port_handle']}", "def portstats64show(obj, content):\n global _portstats_to_api\n\n i, x, chassis_obj = 0, len('portstats64show'), obj.r_chassis_obj()\n while len(content) > i:\n\n # Get the port object\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n if len(buf) == 0:\n i += 1\n continue\n if len(buf) < x or buf[0:x] != 'portstats64show':\n break\n index = int(buf.split(' ')[1])\n port_obj = brcddb_port.port_obj_for_index(chassis_obj, int(buf.split(' ')[1]))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict()\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n # Parse the port statistics\n i += 1\n while len(content) > i and len(content[i]) > 0:\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n cl = buf.split(' ')\n key = _portstats_to_api.get(cl[0])\n if key is not None:\n if 'top_int :' in buf:\n i += 1\n lv = int(gen_util.remove_duplicate_char(content[i].replace('\\t', ' ').strip().split(' ')[0], ' '))\n v = int('{:x}'.format(int(cl[1])) + '{:08x}'.format(lv), 16)\n else:\n v = int(cl[1])\n port_stats_d.update({key: v})\n i += 1\n\n return i", "def fusion_api_get_interconnect_ports(self, uri, api=None, param='', headers=None):\n param = '/ports%s' % param\n return self.ic.get(uri=uri, api=api, headers=headers, param=param)", "def netstat(self):\n \n command = 'netstat -utn'\n lines = subprocess.check_output(command, shell=True).split('\\n')[2:]\n \n\tports = {'tcp':[], 'udp':[]}\n\tfor line in lines:\n\t if len(line) < 4:\n\t continue\n\t\t\n\t words = line.split()\n\t port = int(words[3].split(':')[-1])\n\t lst = ports[words[0]]\n\t if port in lst:\n\t continue\n\t lst.append(port)\n\t \n\tports['tcp'].sort()\n\tports['udp'].sort()\n\t\n\treturn ports", "def get_port_counts(ssh):\r\n cmd02='netstat -na'\r\n retry_number=3\r\n try:\r\n while True:\r\n if retry_number == 0:\r\n logger.writeLog(\"get port counts fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd02)\r\n data02=(stdout.read().decode('gbk').strip().replace(' ','').replace('\\t','').replace('\\r','').replace('\\n',''))\r\n print(data02)\r\n if data02 == \"\":\r\n retry_number -= 1\r\n logger.writeLog(\"port counts data is null\",level='error')\r\n continue\r\n else:\r\n pattern=re.compile('1.*?:22',re.S)\r\n match_list=re.findall(pattern,data02)\r\n print(match_list)\r\n port_count=len(match_list)\r\n logger.writeLog(\"get port counts success\",level='info')\r\n print(\"port connected counts:\",port_count)\r\n return port_count\r\n break\r\n except:\r\n logger.writeLog(\"get port counts error\",level='error')\r\n return None", "def probe_ports( self, ):\r\n ports = self.com_driver.list_available()\r\n self.gui.print_info_string( \"\" )\r\n self.gui.print_info_string( \"Reported Ports from driver:\" )\r\n self.gui.print_info_string( \"\" )\r\n if len( ports ) == 0:\r\n self.gui.print_info_string( \"None \\n\" )\r\n else:\r\n for i_port in ports:\r\n self.gui.print_info_string( i_port[0] )\r\n #self.gui.print_info_string( \"\\n\" )\r\n\r\n self.close_driver()\r\n\r\n self.gui.print_info_string( \"\\nProbe Ports from parameters:\\n\" )\r\n ports = self.com_driver.probe_available( self.parameters.port_list )\r\n ix_line = 0 # what is this ??\r\n for i_port in ports:\r\n ix_line += 1\r\n self.gui.print_info_string( str( i_port ) )\r\n if ix_line == 10:\r\n ix_line = 0\r\n self.gui.print_info_string( \"\\n\" )\r\n #logger.log( fll, a_str )\r\n\r\n return", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def describe_connections_on_interconnect(interconnectId=None):\n pass", "def connection_stats(self, host=\"localhost\", port=9090):\n\t\tmgr = NetworkConnectionsManager(self.db_uri_)\n\t\treturn mgr.connection_stats(host, port)", "def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList", "def list_ports(state):\n\tstate.report()", "def print_polling_traffic_stats(device_int):\n print \"previous counter {}\".format(device_int[\"previous_counter\"])\n print \"current_counter {}\".format(device_int[\"current_counter\"])\n print \"bits_out {}\".format(device_int[\"bits_out\"])\n print \"time_of poll {}\".format(device_int[\"update_time\"])\n print \"previous_update {}\".format(device_int[\"previous_update\"])\n print \"secounds since {}\".format(device_int[\"seconds_since\"])\n print \"bits_per_sec {}\".format(device_int[\"bits_per_sec\"])\n print \"speed {}\".format(device_int[\"speed\"])\n print \"util_percentage {}\".format(device_int[\"util_percentage\"])\n print \"util_percentage after round {}\".format(device_int[\"util_percentage\"])", "def head_port_monitoring(self):\n return self.head_args.port_monitoring if self.head_args else None", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def net_if_stats():\n ret = {}\n rawdict = cext.net_if_stats()\n for name, items in rawdict.items():\n if not PY3:\n assert isinstance(name, unicode), type(name)\n name = py2_strencode(name)\n isup, duplex, speed, mtu = items\n if hasattr(_common, 'NicDuplex'):\n duplex = _common.NicDuplex(duplex)\n ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')\n return ret", "def get_host_stats(self, refresh=False):", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def port(self) -> int:", "def get_network(isamAppliance, application_interface, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving the Application Interface Statistics\",\n \"/analysis/interface_statistics.json{0}\".format(\n tools.create_query_string(prefix=application_interface,\n timespan=statistics_duration)),requires_model=requires_model)", "def describe_connections(connectionId=None):\n pass", "def port():", "def Port(self) -> int:", "def cmd_port(args):", "def portstatsshow(obj, content):\n global _portstats_to_api\n\n port_obj, port_stats_d, switch_obj = None, None, obj.r_switch_obj()\n\n for buf in content:\n buf = buf.replace('er_single_credit_loss', 'er_single_credit_loss ')\n buf = buf.replace('er_multi_credit_loss', 'er_multi_credit_loss ')\n buf = buf.replace('fec_corrected_rate', 'fec_corrected_rate ')\n buf = buf.replace('latency_dma_ts', 'latency_dma_ts ')\n tl = gen_util.remove_duplicate_char(buf.replace('\\t',' '), ' ').split(' ')\n if len(tl) < 2:\n continue\n\n if tl[0] == 'port:':\n port_obj = brcddb_port.port_obj_for_index(switch_obj, int(tl[1].strip()))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict(name=port_obj.r_obj_key())\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n elif tl[0] in _portstatsshow_special:\n _portstatsshow_special[tl[0]](port_obj)\n\n else:\n key = _portstats_to_api.get(tl[0])\n if key is not None:\n port_stats_d.update({key: int(tl[1])})", "def ShowPort(cmd_args=None, cmd_options={}):\n show_kmsgs = True\n if \"-K\" in cmd_options:\n show_kmsgs = False\n if not cmd_args:\n print \"Please specify the address of the port whose details you want to print\"\n print ShowPort.__doc__\n return\n port = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_port *')\n print PrintPortSummary.header\n PrintPortSummary(port, show_kmsgs)", "def get(self, *args, **kwargs):\n output = self._base_stats()\n output['connections'] = dict()\n for key in self.application.rabbitmq.keys():\n output['connections'][key] = self.application.rabbitmq[key].stats\n self.write(output)", "def get_device_traffic(context,target):\n\n result = context.get_operation('get_interfaces_traffic')\n return result", "def stats(self, **kwargs):\n return stats.stats(self._host, self._session, **kwargs)", "def _get_data(self):\n raw_data = self._get_raw_data()\n if not raw_data:\n return None\n result = {}\n for line in raw_data:\n if 'tcp' in line:\n parts = line.split()\n proto = parts[0]\n local_addr = parts[3]\n state = parts[5]\n ip, port = local_addr.rsplit(':', 1)\n port = str(port)\n result[port] = 1\n if state == 'LISTEN':\n if port not in self.charts['ports']:\n self.charts['ports'].add_dimension([port, port, 'absolute'])\n return result" ]
[ "0.6443213", "0.63886684", "0.5857132", "0.57869655", "0.5770844", "0.57610476", "0.5745559", "0.5716668", "0.57158166", "0.56988394", "0.56756365", "0.5665839", "0.56434876", "0.5636329", "0.5626529", "0.5593384", "0.55802625", "0.55275863", "0.548358", "0.5459376", "0.54384977", "0.543275", "0.5420952", "0.54050905", "0.5375758", "0.5324166", "0.5323968", "0.5313449", "0.53102577", "0.5283048" ]
0.7066244
0
Issues an Patch Interconnect request for Potash\Potassium modules [Arguments]
def fusion_api_patch_interconnect(self, body, uri, param='', api=None, headers=None): return self.ic.patch(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)", "def simulate_patch(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PATCH', path, **kwargs)", "def _patch(self, path=None, version=None, params=None,\n data=None, json=None, header=None):\n return self.client.patch(module='mam', path=path, version=version,\n params=params, data=data,\n json=json, header=header)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(\n config, request, global_params=global_params)" ]
[ "0.61423445", "0.5874639", "0.5815449", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945", "0.5758945" ]
0.63652766
0
Updates the port details for given Interconnect [Arguments]
def fusion_api_edit_interconnect_ports(self, body, uri, api=None, param='', headers=None): param = '/update-ports%s' % param return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def update(clients, context, name=None):\n port_id = context['port_id']\n logger.info(\"Taking action port.update {}.\".format(port_id))\n neutron = clients.get_neutron()\n body = {'port': {}}\n if name is not None:\n body['port']['name'] = name\n neutron.update_port(port_id, body=body)", "def cmd_port(args):", "def modify_ports(self, ports, **kwargs):\n pass", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def update(self, oid, name, network_id, fixed_ips, host_id=None, \n profile=None, vnic_type=None, device_owner=None, device_id=None,\n security_groups=None):\n data = {\n \"port\": {\n }\n }\n if network_id is not None:\n data['port']['network_id'] = network_id\n if name is not None:\n data['port']['name'] = name\n if fixed_ips is not None:\n data['port']['fixed_ips'] = fixed_ips\n if host_id is not None:\n data['port']['binding:host_id'] = host_id\n if profile is not None:\n data['port']['binding:profile'] = profile\n if host_id is not None:\n data['port']['binding:vnic_type'] = vnic_type\n if device_owner is not None:\n data['port']['device_owner'] = device_owner\n if device_id is not None:\n data['port']['device_id'] = device_id\n if security_groups is not None:\n data['port']['security_groups'] = security_groups\n \n path = '%s/ports/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack port: %s' % truncate(res))\n return res[0]['port']", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def modify_rstp_ports(self, ports, **kwargs):\n pass", "def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass", "def fusion_api_update_li_port_monitor_configuration(self, body=None, uri=None, api=None, headers=None):\n param = '/port-monitor'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def port_configure(self,port,**config):\n if not port in self.ports:\n self.ports[port] = {}\n\n for k,v in config.items():\n self.ports[port][k] = v", "def ConnectPort(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('connectPort', payload=payload, response_object=None)", "def fill_port(self, data):\n self.port = get_optional_value(data, self.PORT, None)\n if self.port:\n self.port = \":\" + str(self.port)\n else:\n self.port = \"\"", "def activate(self, ext_ip, ext_port):\n self.sql_manager.port_update(self.id, external_ip=ext_ip, external_port=ext_port)\n self.external_port = ext_port\n self.external_ip = ext_ip", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def port():", "def run(self, port_update_args, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n self.neutron.update_port(port[\"id\"], **port_update_args)", "def connect_port(self, iface):\n self.iface_config(iface, adminMode='Up')", "def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)", "def __init__(__self__, *,\n from_port: pulumi.Input[int],\n to_port: pulumi.Input[int]):\n pulumi.set(__self__, \"from_port\", from_port)\n pulumi.set(__self__, \"to_port\", to_port)", "def dummy_set_comm_port(port):\n pass", "def _update_external_port(openstack_resource):\n # Get the external port using the resource id provided via port node\n external_port = openstack_resource.get()\n # Check if the current port node has allowed_address_pairs as part of\n # resource_config\n addresses_to_add = openstack_resource.config.get('allowed_address_pairs')\n if addresses_to_add:\n old_addresses = external_port.get('allowed_address_pairs') or []\n\n # Get the old ips from the each pair\n old_ips = \\\n [\n old_address['ip_address']\n for old_address\n in old_addresses if old_address.get('ip_address')\n ]\n # Get the ips need to be added to the external port\n ips_to_add = \\\n [\n address_to_add['ip_address']\n for address_to_add\n in addresses_to_add if address_to_add.get('ip_address')\n ]\n\n # Check if there are a common ips between old ips and the one we\n # should add via node\n common_ips = set(old_ips) & set(ips_to_add)\n if common_ips:\n raise NonRecoverableError(\n 'Ips {0} are already assigned to {1}'\n ''.format(common_ips, external_port.id))\n\n # Update port for allowed paris\n updated_port = openstack_resource.update(\n {'allowed_address_pairs': addresses_to_add})\n # Update runtime properties\n update_runtime_properties(\n {\n 'fixed_ips': updated_port.fixed_ips,\n 'mac_address': updated_port.mac_address,\n 'allowed_address_pairs': updated_port.allowed_address_pairs,\n }\n )\n\n # Get the networks from relationships if they are existed\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n if rel_network_id:\n port = openstack_resource.get()\n if port['network_id'] != rel_network_id:\n raise NonRecoverableError(\n 'Expected external resources port {0} and network {1} '\n 'to be connected'.format(port.id, rel_network_id))", "def update_endpoint_in_sipserver(self, endpoint: str, password: str) -> None:", "def connect_icache(self, port: Port) -> None:\n self.port_end.req_ports = port", "def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)", "def Port(self) -> int:", "def add_in_port(self, m: int, content: str, **opts) -> None:", "def setport(self, port):\n self.__port = port", "def _set_port(self, ip, port_name):\n\n inst_args = getattr(self, ip.top_name)\n try:\n name = [key for key in inst_args.keys() if key[2:] == port_name][0]\n except IndexError:\n raise ValueError(f'port: \"{port_name}\" does not exist in ip: '\n f'{ip.top_name}')\n sig = inst_args[name]\n sig.name = port_name\n setattr(self, port_name, sig)\n self._ports.append(sig)", "def _update_port_association(client_config, port_id, device_id=''):\n # Check if the port is provided or not\n if not port_id:\n raise NonRecoverableError(\n 'Unable to attach port to device {0},'\n ' `port_id` is missing'.format(\n device_id)\n )\n # Prepare the port instance to attach/detach server from/to the current\n # port\n port_resource = OpenstackPort(client_config=client_config,\n logger=ctx.logger)\n\n # Set port id\n port_resource.resource_id = port_id\n\n # Update port\n port_resource.update({'device_id': device_id})" ]
[ "0.6771649", "0.65053034", "0.6453406", "0.6331727", "0.6161114", "0.61439186", "0.6083497", "0.5743908", "0.57372636", "0.5715563", "0.5684003", "0.5654657", "0.562856", "0.5612382", "0.55877507", "0.5551602", "0.5522054", "0.5505052", "0.54860455", "0.5481624", "0.54763967", "0.5473575", "0.5456739", "0.5451075", "0.5436208", "0.54174346", "0.54160374", "0.54128116", "0.53983796", "0.53767556" ]
0.6886794
0
Clear the port counter details for given Interconnect [Arguments]
def fusion_api_clear_interconnect_ports(self, body, uri, api=None, param='', headers=None): param = '/statistics/reset%s' % param return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_statistics(self, sniff_port_list):\n pass", "def reset_counter(self) -> None:", "def ClearPortsAndTrafficStats(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"clearPortsAndTrafficStats\", payload=payload, response_object=None\n )", "def Clear(port):\n\tport.write(\"C\")", "def reset(self):\n self.sql_manager.port_update(self.id, external_ip=None, external_port=None)\n self.external_port = None\n self.external_ip = None", "def reset(self) -> None:\n self.memory = self.intcode.copy()\n self.ip = 0\n self.stdout.clear()", "def ClearProtocolStats(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"clearProtocolStats\", payload=payload, response_object=None\n )", "def port_revert(switch, port):\n print client.port.port_revert(switch, port)", "def clear_ironic_port_internalinfo(db, port_id):\n sql = '''\\\n UPDATE ironic.ports\n SET internal_info = '{}'\n WHERE uuid = %s;\n '''\n return db.query(sql, args=[port_id], no_rows=True)", "def port_delete(switch, port):\n client.port.delete(switch, port)", "def teardown_logical_port_connectivity(self, context, port_db):\n pass", "def __clear_telnet_port(self, console_ip, port):\n logging.info(\"Clearing console with ip=%s and ports=%s\", console_ip, port)\n pwdList = ['cisco123', 'lab', 'nbv123']\n pwdList.remove(self.console_pwd)\n pwdTry = 0\n console = pexpect.spawn('telnet %s'%(console_ip))\n console.logfile = self.log\n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, r'Bad', r'(?i)incorrect', PWD_PROMPT, CONSOLE_PROMPT, EN_CONSOLE_PROMPT], 5)\n while i >= 0:\n if i == 0:\n console.close()\n raise TimeoutError('Clear Console Timeout error')\n if i == 1:\n console.close()\n raise EofError('Clear Console EOF error')\n if i == 2 or i == 3:\n console.close()\n raise PasswordError('Clear Console, Password error')\n if i == 4:\n logging.info(\"pwd %s\", self.console_pwd)\n if pwdTry == 0:\n console.sendline(self.console_pwd)\n elif pwdTry > 0 and pwdTry <= len(pwdList):\n console.sendline(pwdList[pwdTry - 1])\n self.console_pwd = pwdList[pwdTry-1]\n else:\n console.close()\n raise PasswordError('Clear Console, Password error')\n pwdTry = pwdTry + 1\n if i == 5:\n logging.info(\"console prompt\")\n Utils.update_console_login_details(self.switch_name, self.console_user, self.console_pwd)\n console.sendline('en')\n console.expect(PWD_PROMPT)\n console.sendline(self.console_pwd)\n if i == 6:\n logging.info(\"en console prompt\")\n Utils.update_console_login_details(self.switch_name, self.console_user, self.console_pwd)\n break \n i = console.expect([pexpect.TIMEOUT, pexpect.EOF, r'Bad', r'(?i)incorrect', PWD_PROMPT, CONSOLE_PROMPT, EN_CONSOLE_PROMPT], 5)\n \n po = int(port)%100\n console.sendline('clear line %d'%(po))\n console.sendline('\\r')\n console.expect('confirm')\n console.sendline('\\r')\n console.expect(EN_CONSOLE_PROMPT)\n console.sendline('exit')\n time.sleep(1)\n console.close()\n return", "def clear_ic(self): # -> None:\n ...", "def reset (self):\n self.counter = 0", "def reset(self):\n self.counter = 0", "def reset(*args):", "def reset(*args):", "def reset(*args):", "def clear_node():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\")\n else:\n cmd = _traffic_line(\"-c\")\n\n return _subprocess(cmd)", "def _serial_clear(self):\n self.serial.reset_input_buffer()", "def clear_break_cmd(cmd, cnt, args):\n if cnt == 1:\n log(\"Clear break command needs an address\")\n return \n log(\"clear break\"+\" {:08x}\".format(int(args[1], 16)))\n cpu.clear_break(int(args[1],16))", "def _set_unconnected_ports(self):\n for name, ip in self._ips.items():\n count = 0\n inst_args = getattr(self, name)\n ports = ip.get_ports()\n for port in ports:\n full_name = (port_direction_to_prefix(port.direction)\n + port.name)\n if full_name not in inst_args.keys():\n sig = Signal(len(port), name=full_name)\n inst_args[full_name] = sig\n count += 1", "def clear_lldp_counters(device):\r\n try:\r\n device.execute('clear lldp counters')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not clear LLDP counters\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def delete_interconnect(interconnectId=None):\n pass", "def reset(self):\n\n # Issue the reset command\n try:\n self.crate_resetting = True\n # Reset the FRU init status to stop attempts to read the sensors\n self.frus_inited = False\n # Wait a few seconds to allow any existing ipmitool requests to complete\n print(\"reset: Short wait before resetting (2 s)\")\n time.sleep(2.0)\n # Force the records to invalid\n print(\"reset: Force sensor read to set invalid\")\n self.read_sensors()\n print(\"reset: Triggering records to scan\")\n self.scan_list.interrupt()\n self.mch_comms.connected = False\n # Stop the ipmitool session. System will reconnect on restart\n self.mch_comms.ipmitool_shell.terminate()\n time.sleep(2.0)\n #print(\"reset: Killing ipmitool shell process\")\n self.mch_comms.ipmitool_shell.kill()\n self.mch_comms.ipmitool_shell = None\n # Stop the reader thread\n #print(\"reset: Stopping thread\")\n self.mch_comms.stop = True\n # Wait for the thread to stop\n self.mch_comms.t.join()\n #print(\"reset: Thread stopped\")\n self.mch_comms.t = None\n # Allow the thread to restart\n self.mch_comms.stop = False\n #print(\"reset: Exiting \")\n # Reset the crate\n print(\"reset: Resetting crate now\")\n self.mch_comms.call_ipmitool_direct_command([\"raw\", \"0x06\", \"0x03\"])\n\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n # Be silent. We expect this command to timeout.\n print('reset: reset command sent')\n pass\n\n # Reconnect to the crate\n print('reset: reconnecting')\n self.mch_comms.ipmitool_shell_reconnect()", "def reset(self) -> None:\n self.counterpoint = self.counterpoint[0:1]\n self.__initialize_piano_roll()\n self.__set_defaults_to_runtime_variables()", "def cancel_port_forward_request(self, address, port):\n pass", "def resetCounters(self):\n self.chain.zero_counters()\n counters = self.session.query(Counter).all()\n self.session.query(Counter).delete()", "def map_clear_to(self, src_port, dst_port, command_logger=None):\r\n\r\n #Isolate source port number from list provided by Cloudshell\r\n source = src_port[2]\r\n #Define URI to delete rules via REST\r\n uri = 'http://' + self.address + '/rest/rules?'\r\n #Create the parameters for the rule to be deleted from the Packetmaster\r\n params = {'priority': 32768,\r\n 'match[in_port]': source}\r\n #Make REST delete request for the rule to be deleted\r\n try:\r\n response = requests.delete(uri, params=params, auth=(self.username, self.password))\r\n except ConnectionError as e:\r\n raise e", "def reset():" ]
[ "0.64994836", "0.60892713", "0.60308844", "0.6021455", "0.5777132", "0.57342356", "0.57211506", "0.56562513", "0.55634856", "0.54906595", "0.5454911", "0.54512906", "0.5441213", "0.54364866", "0.5391103", "0.53831506", "0.53831506", "0.53831506", "0.5365972", "0.53523785", "0.53455365", "0.53413624", "0.53218144", "0.5306663", "0.5303396", "0.52987397", "0.5297946", "0.5264616", "0.525669", "0.5230972" ]
0.68146515
0
Gets the nameServers details for given Interconnect. This info is for DirectAttach connection info [Arguments]
def fusion_api_get_interconnect_nameservers(self, uri=None, api=None, param='', headers=None): param = '/nameServers%s' % param return self.ic.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def describe_connections_on_interconnect(interconnectId=None):\n pass", "def nameservers(self, irc, msg, args):\n irc.reply('Our Nameservers are NS1.CHEMICALSERVERS.COM and NS2.CHEMICALSERVERS.COM')", "def get_connection_info(self):\n return [(c.fullname, [u[1] for u in c.objects])\n for c in self._connections]", "def name_servers(self) -> Sequence[str]:\n return pulumi.get(self, \"name_servers\")", "def get_server_info_list(self):\n # TODO: 不要では?特に理由がなければ削除する\n result = []\n if self._server_sock is not None:\n result.append(\"Sever address: %s\" %\n str(self._server_sock.getsockname()))\n else:\n result.append(\"Sever address: Not initialized yet.\")\n result.append(\"Handler: %s\" %\n str(self._data_handler.__class__))\n result.append(\"Sessions: %d\" % len(self._sessions))\n for idx, session_thread in enumerate(self._sessions):\n result.append(\"Session[%d]: %s\" % (\n idx, str(session_thread.client_address)))\n return result", "def details(self):\n return self.sock.getsockname()", "def describe_connections(connectionId=None):\n pass", "def server_names(self):\n return self._server_names", "def server_info(ctx):\n data = ctx.obj.get_server_info()\n output_json_data(data)", "def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res", "def get_interneurons_names(self):\n\t\treturn self._interNeuronsNames", "def __getLocalAndRemoteMachineNames(self):\n hostNameMapping = {}\n ## collect the qualified hostnames for each remote node\n for nodeId in list(set(self.runInfoDict['Nodes'])):\n hostNameMapping[nodeId.strip()] = socket.gethostbyname(nodeId.strip())\n self.raiseADebug('Host \"'+nodeId.strip()+'\" identified with IP: ', hostNameMapping[nodeId.strip()])\n\n return hostNameMapping", "def get_connections(self, id, connection_name, **args):\n return self.request(id + \"/\" + connection_name, args)", "def get_info(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETINFO %s\\r\\n\"%name)\r\n d = {}\r\n for _,msg,more in lines:\r\n if msg == \"OK\":\r\n break\r\n try:\r\n k,rest = msg.split(\"=\",1)\r\n except ValueError:\r\n raise ProtocolError(\"Bad info line %r\",msg)\r\n if more:\r\n d[k] = more\r\n else:\r\n d[k] = rest\r\n return d", "def fetch_info():\n global JOLOKIA_CONNECTIONS\n for connection in JOLOKIA_CONNECTIONS.keys():\n try:\n data = JOLOKIA_CONNECTIONS[connection]['j4p'].getRequests()\n for ele in data:\n parse_info(ele, JOLOKIA_CONNECTIONS[connection]['instance'])\n except Exception, e:\n collectd.error('jolokia plugin: Error at jolokia endpoint %s - %r' % (connection, e))", "def describe_interconnects(interconnectId=None):\n pass", "def lsinfo(name):", "def _get_names(self, item_type):\n data = self.get_json('Get-{} -VMMServer $scvmm_server'.format(item_type))\n if data:\n return [item['Name'] for item in data] if isinstance(data, list) else [data[\"Name\"]]\n else:\n return None", "def _get_servername(self):\n #recuperation objet bdd tango\n db = PyTango.Database()\n #recuperation de la liste des servers dans la bdd\n server_list = db.get_server_list()\n server_name = ''\n #pour chaque servers de la liste\n for server in server_list:\n #recuperation de la liste des noms des devices\n lst_devices_address = db.get_device_class_list(server).value_string\n #mise de la liste en lower case\n lst_devices_address_lower = [ i.lower() for i in lst_devices_address]\n #si le nom du device est dans la liste, alors on retourne le nom du serveur\n if self.device_name.lower() in lst_devices_address_lower:\n server_name = server\n return server_name", "def getServerInterfaces(self):\n return self.servers", "def mmo_mongos_servers(self, mmo_connection):\n mongos_servers = []\n c = mmo_connection[\"config\"].mongos.find({}, { \"_id\": 1 } )\n for doc in c:\n hostname, port = doc[\"_id\"].split(\":\")\n mongos_servers.append({ \"hostname\": hostname, \"port\": int(port) })\n return mongos_servers", "def get_server_info(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_GetServerInfo', self.handle))", "def get_servers(self):\n\t\treturn self.__servers", "def get_server_metadata(self, name):\n raise NotImplementedError", "def NamesGet(self):\n ## Make Header\n hex_rep = self.NanonisTCP.make_header('Signals.NamesGet', body_size=0)\n \n self.NanonisTCP.send_command(hex_rep)\n \n response = self.NanonisTCP.receive_response()\n \n # signals_names_size = self.NanonisTCP.hex_to_int32(response[0:4])\n signals_names_num = self.NanonisTCP.hex_to_int32(response[4:8])\n \n idx = 8\n signal_names = []\n for n in range(signals_names_num):\n size = self.NanonisTCP.hex_to_int32(response[idx:idx+4])\n idx += 4\n signal_name = response[idx:idx+size].decode()\n idx += size\n signal_names.append(signal_name)\n \n return signal_names", "def info(self, name=None):\n data = self.cloudman.list_servers(filters={'name': name})\n\n \"\"\"\n vms = self.list()\n print (\"VMS\", vms)\n data = None\n for entry in vms:\n print (\"FFF\", entry['name'])\n if entry['name'] == name:\n data = entry\n break\n \"\"\"\n\n if data is None:\n raise ValueError(f\"vm not found {name}\")\n\n r = self.update_dict(data, kind=\"vm\")\n return r", "def rpc_info():", "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))" ]
[ "0.5718423", "0.5709898", "0.5665074", "0.54669225", "0.54476196", "0.5404155", "0.5365162", "0.53490686", "0.523069", "0.5230685", "0.51507515", "0.5106366", "0.5093724", "0.50623184", "0.500052", "0.4969527", "0.49592245", "0.49448416", "0.49219626", "0.49218157", "0.49121836", "0.49121097", "0.48742092", "0.48640952", "0.48418254", "0.48185357", "0.47975215", "0.47958252", "0.47946334", "0.47903776" ]
0.66284454
0
Gets the certificate details for givenInterconnect [Arguments]
def fusion_api_get_certificate_info(self, uri=None, api=None, param='', headers=None): param = '/certificates/https/' return self.ic.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ssl_certificate():", "def get_ssl_certificate() :", "def certificate(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> str:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"certificate\")", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def DescribeCertificateDetail(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeCertificateDetail\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeCertificateDetailResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def certificate(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"certificate\")", "def _extract_certificate_chain(connection):\n logger = getLogger(__name__)\n cert_data = {}\n logger.debug(\n \"# of certificates: %s\",\n len(connection.get_peer_cert_chain()))\n\n for cert in connection.get_peer_cert_chain():\n logger.debug(\n u'subject: %s, issuer: %s', cert.get_subject(),\n cert.get_issuer())\n data = _extract_values_from_certificate(cert)\n logger.debug('is_root_ca: %s', data[u'is_root_ca'])\n cert_data[cert.get_subject().der()] = data\n return _create_pair_issuer_subject(cert_data)", "def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)", "def fetch_x509_context(self) -> X509Context:", "def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc", "def certificate_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"certificate_id\")", "def certificate_id(self) -> str:\n return pulumi.get(self, \"certificate_id\")", "def find_certificate(p): # find_certificate(props, /)\n\n for page in acm.get_paginator('list_certificates').paginate():\n for certificate in page['CertificateSummaryList']:\n log_info(certificate)\n\n if p['DomainName'].lower() == certificate['DomainName']:\n tags = {tag['Key']: tag['Value'] for tag in\n acm.list_tags_for_certificate(**{'CertificateArn': certificate['CertificateArn']})['Tags']}\n\n if (tags.get('cloudformation:' + 'logical-id') == e['LogicalResourceId'] and\n tags.get('cloudformation:' + 'stack-id') == e['StackId'] and\n tags.get('cloudformation:' + 'properties') == hash_func(p)\n ):\n return certificate['CertificateArn']", "def getCertifications(self):\n return [c for c in self.objectValues('InstrumentCertification') if c]", "def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}", "def get_certificate_transparency_connection(self):\n return self.m_connection.ct_certs", "def cert_info(user, course):\r\n if not course.may_certify():\r\n return {}\r\n\r\n return _cert_info(user, course, certificate_status_for_student(user, course.id))", "def get_certificate(self, cert_id):\r\n return self.ssl.getObject(id=cert_id)", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")", "def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")" ]
[ "0.64243686", "0.6397666", "0.60701257", "0.60701257", "0.60701257", "0.5937687", "0.5876968", "0.5876968", "0.5870272", "0.5870272", "0.58246243", "0.58152014", "0.5779792", "0.5753348", "0.5748844", "0.57136863", "0.5678284", "0.56662375", "0.5600036", "0.5583016", "0.5573438", "0.55653685", "0.55623275", "0.5558074", "0.5532256", "0.5532256", "0.5532256", "0.5532256", "0.5532256", "0.5532256" ]
0.6490048
0
Uploads the certificate details for givenInterconnect [Arguments]
def fusion_api_upload_certificate_info(self, body, uri=None, api=None, param='', headers=None): param = '/certificates/https/' return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload(name, certfile, keyfile, chainfile):\n try:\n cmd = client().certificates.upload\n job, data = cmd(name, certfile, keyfile, chainfile)\n handle_job(job)\n except Exception as e:\n raise CLIException(str(e))", "def Run(self, args):\n holder = base_classes.ComputeApiHolder(self.ReleaseTrack())\n client = holder.client\n\n ssl_certificate_ref = self.SSL_CERTIFICATE_ARG.ResolveAsResource(\n args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL)\n\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n\n if ssl_certificates_utils.IsRegionalSslCertificatesRef(ssl_certificate_ref):\n request = client.messages.ComputeRegionSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n name=ssl_certificate_ref.Name(),\n certificate=certificate,\n privateKey=private_key,\n description=args.description),\n region=ssl_certificate_ref.region,\n project=ssl_certificate_ref.project)\n collection = client.apitools_client.regionSslCertificates\n else:\n request = client.messages.ComputeSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n name=ssl_certificate_ref.Name(),\n certificate=certificate,\n privateKey=private_key,\n description=args.description),\n project=ssl_certificate_ref.project)\n collection = client.apitools_client.sslCertificates\n\n return client.MakeRequests([(collection, 'Insert', request)])", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def _Run(args, holder, ssl_certificate_ref):\n client = holder.client\n\n certificate_type, self_managed, managed = _ParseCertificateArguments(\n client, args)\n\n if ssl_certificates_utils.IsRegionalSslCertificatesRef(ssl_certificate_ref):\n request = client.messages.ComputeRegionSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n type=certificate_type,\n name=ssl_certificate_ref.Name(),\n selfManaged=self_managed,\n managed=managed,\n description=args.description),\n region=ssl_certificate_ref.region,\n project=ssl_certificate_ref.project)\n else:\n request = client.messages.ComputeSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n type=certificate_type,\n name=ssl_certificate_ref.Name(),\n selfManaged=self_managed,\n managed=managed,\n description=args.description),\n project=ssl_certificate_ref.project)\n\n if ssl_certificates_utils.IsRegionalSslCertificatesRef(ssl_certificate_ref):\n collection = client.apitools_client.regionSslCertificates\n else:\n collection = client.apitools_client.sslCertificates\n\n return client.MakeRequests([(collection, 'Insert', request)])", "def sign_handler(args):\n if not args.issuer_key and not args.issuer_cert:\n key = _get_key(args)\n subject = get_subject_arguments()\n\n cert = selfsigned_certificate_for_key(\n key,\n subject=subject,\n serial_number=int(args.serial_number),\n length=args.duration,\n file_name=args.cert_out\n )\n\n else:\n req = _get_request(args)\n issuer_cert = load_certificate(args.issuer_cert)\n issuer_key = load_key(args.issuer_key)\n cert = sign_request(\n req,\n issuer_cert=issuer_cert,\n issuer_key=issuer_key,\n length=args.duration,\n file_name=args.cert_out\n )\n\n if not args.cert_out:\n print(print_certificate(cert))", "def sign_command(args):\n if args.files:\n die(\"Unexpected positional arguments\")\n\n # Load certificate request\n if not args.request:\n die(\"Need --request\")\n subject_csr = load_req(args.request)\n\n reset_info = None\n if args.reset:\n reset_info = info_from_args(args)\n\n # Load CA info\n if not args.ca_info:\n die(\"Need --ca-info\")\n if args.ca_info.endswith('.csr'):\n issuer_obj = load_req(args.ca_info)\n else:\n issuer_obj = load_cert(args.ca_info)\n\n # Load CA private key\n issuer_key = load_key(args.ca_key, load_password(args.password_file))\n if not same_pubkey(issuer_key, issuer_obj):\n die(\"--ca-private-key does not match --ca-info data\")\n\n # Certificate generation\n cert = do_sign(subject_csr, issuer_obj, issuer_key, args.days, args.path_length, args.request, reset_info=reset_info)\n\n # Write certificate\n do_output(cert_to_pem(cert), args, 'x509')", "def CreateRequests(self, args):\n\n ssl_certificate_ref = self.SSL_CERTIFICATE_ARG.ResolveAsResource(\n args, self.resources)\n certificate = file_utils.ReadFile(args.certificate, 'certificate')\n private_key = file_utils.ReadFile(args.private_key, 'private key')\n\n request = self.messages.ComputeSslCertificatesInsertRequest(\n sslCertificate=self.messages.SslCertificate(\n name=ssl_certificate_ref.Name(),\n certificate=certificate,\n privateKey=private_key,\n description=args.description),\n project=self.project)\n\n return [request]", "def fusion_api_generate_certificate_signing_request(self, body, api=None, headers=None):\n return self.wsc.post(body, api=api, headers=headers)", "def fusion_api_create_certificate_request(self, body, uri=None, api=None, param='', headers=None):\n param = '/certificates/https/certificaterequest'\n return self.ic.post(uri=uri, body=body, api=api, headers=headers, param=param)", "def upload(pcap_file, fingerprint_path, key):\r\n files = {\r\n \"json\": open(fingerprint_path, \"rb\"),\r\n \"pcap\": open(pcap_file, \"rb\")\r\n }\r\n headers = {\r\n \"X-Username\": \"<USER>\",\r\n \"X-Password\": \"<PASSWORD>\",\r\n \"X-Filename\": key\r\n }\r\n ddosdb_url = \"https://ddosdb.org/\"\r\n r = requests.post(ddosdb_url+\"upload-file\", files=files, headers=headers)\r\n\r\n print(r.status_code)", "def enroll_certificate(self, kwargs):\n return self.__query(\"certificateEnroll\", kwargs)", "def get_ssl_certificate() :", "def put_certificate(self, target, who, args, _files, _user_path):\n name = self.arg_get(args, 'name', str)\n if not commonl.verify_str_safe(name, do_raise = False):\n raise ValueError(\n f\"{name}: invalid certificate name, only [-_a-zA-Z0-9] allowed\")\n\n with target.target_owned_and_locked(who):\n target.timestamp()\n\n cert_path = os.path.join(target.state_dir, \"certificates\")\n cert_client_path = os.path.join(target.state_dir, \"certificates_client\")\n self._setup_maybe(target, cert_path, cert_client_path)\n\n client_key_path = os.path.join(cert_client_path, name + \".key\")\n client_req_path = os.path.join(cert_client_path, name + \".req\")\n client_cert_path = os.path.join(cert_client_path, name + \".cert\")\n\n if os.path.isfile(client_key_path) \\\n and os.path.isfile(client_cert_path):\t# already made?\n with open(client_key_path) as keyf, \\\n open(client_cert_path) as certf:\n return dict({\n \"name\": name,\n \"created\": False,\n \"key\": keyf.read(),\n \"cert\": certf.read(),\n })\n\n try:\n subprocess.run(\n f\"openssl genrsa -out {client_key_path} {self.key_size}\".split(),\n stdin = None, timeout = 5,\n capture_output = True, cwd = cert_path, check = True)\n allocid = target.fsdb.get(\"_alloc.id\", \"UNKNOWN\")\n subprocess.run(\n f\"openssl req -new -key {client_key_path} -out {client_req_path}\"\n f\" -subj /C=LC/ST=Local/L=Local/O=TCF-Signing-Authority-{target.id}-{allocid}/CN=TCF-{name}\".split(),\n check = True, cwd = cert_path,\n stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n target.log.debug(f\"{name}: created client's certificate\")\n\n # Issue the client certificate using the cert request and the CA cert/key.\n # note we run in the cert_path directory, so the ca.*\n # files are there\n subprocess.run(\n f\"openssl x509 -req -in {client_req_path} -CA ca.cert\"\n \" -CAkey ca.key -set_serial 101 -extensions client\"\n f\" -days 365 -outform PEM -out {client_cert_path}\".split(),\n stdin = None, timeout = 5,\n capture_output = True, cwd = cert_path, check = True)\n except subprocess.CalledProcessError as e:\n target.log.error(f\"command {' '.join(e.cmd)} failed: {e.output}\")\n self._client_wipe(name, cert_client_path)\t# don't leave things half there\n raise\n\n with open(client_key_path) as keyf, \\\n open(client_cert_path) as certf:\n return dict({\n \"name\": name,\n \"created\": True,\n \"key\": keyf.read(),\n \"cert\": certf.read(),\n })", "def __init__(__self__, *,\n certificate: Optional[pulumi.Input[str]] = None,\n certificate_id: Optional[pulumi.Input[str]] = None,\n certificate_name: Optional[pulumi.Input[str]] = None,\n domain: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None):\n if certificate is not None:\n pulumi.set(__self__, \"certificate\", certificate)\n if certificate_id is not None:\n pulumi.set(__self__, \"certificate_id\", certificate_id)\n if certificate_name is not None:\n pulumi.set(__self__, \"certificate_name\", certificate_name)\n if domain is not None:\n pulumi.set(__self__, \"domain\", domain)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if private_key is not None:\n pulumi.set(__self__, \"private_key\", private_key)", "def __init__(__self__,\n resource_name: str,\n args: CertificateArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def main():\n # This is used to store the certificate filename\n cert = \"\"\n\n # Setup a signal handler to catch control-c and clean up the cert temp file\n # No way to catch sigkill so try not to do that.\n # noinspection PyUnusedLocal\n def sigint_handler(sig, frame): # pylint:disable=unused-argument\n \"\"\"Handle interrupt signals.\"\"\"\n if not args.cert:\n try:\n os.unlink(cert)\n except OSError: # pylint:disable=pointless-except\n pass\n print \"Exiting...\"\n sys.exit(0)\n\n parser = ArgumentParser('Remote APIC API Inspector and GUI Log Server')\n\n parser.add_argument('-a', '--apicip', required=False, default='8.8.8.8',\n help='If you have a multihomed system, where the ' +\n 'apic is on a private network, the server will ' +\n 'print the ip address your local system has a ' +\n 'route to 8.8.8.8. If you want the server to ' +\n 'print a more accurate ip address for the ' +\n 'server you can tell it the apicip address.')\n\n parser.add_argument('-c', '--cert', type=str, required=False,\n help='The server certificate file for ssl ' +\n 'connections, default=\"server.pem\"')\n\n parser.add_argument('-d', '--delete_imdata', action='store_true',\n default=False, required=False,\n help='Strip the imdata from the response and payload')\n\n parser.add_argument('-e', '--exclude', action='append', nargs='*',\n default=[], choices=['subscriptionRefresh',\n 'aaaRefresh',\n 'aaaLogout',\n 'HDfabricOverallHealth5min-0',\n 'topInfo', 'all'],\n help='Exclude certain types of common noise queries.')\n\n parser.add_argument('-i', '--indent', type=int, default=2, required=False,\n help='The number of spaces to indent when pretty ' +\n 'printing')\n\n parser.add_argument('-l', '--location', default='/apiinspector',\n required=False,\n help='Location that transaction logs are being ' +\n 'sent to, default=/apiinspector')\n\n parser.add_argument('-n', '--nice-output', action='store_true',\n default=False, required=False,\n help='Pretty print the response and payload')\n\n parser.add_argument('-p', '--port', type=int, required=False, default=8987,\n help='Local port to listen on, default=8987')\n\n parser.add_argument('-s', '--sslport', type=int, required=False,\n default=8443,\n help='Local port to listen on for ssl connections, ' +\n 'default=8443')\n\n parser.add_argument('-r', '--requests-log', action='store_true',\n default=False, required=False,\n help='Log server requests and response codes to ' +\n 'standard error')\n\n parser.add_argument('-t', '--title', default='SimpleAciUiLogServer',\n required=False,\n help='Change the name shown for this application ' +\n 'when accessed with a GET request')\n\n parser.add_argument('-ty', '--type', action='append', nargs='*',\n default=['all'], choices=['POST', 'GET', 'undefined',\n 'EventChannelMessage'],\n help='Limit logs to specific request types.')\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s - \\n%(message)s')\n if args.exclude:\n # Flatten the list\n args.exclude = [val for sublist in args.exclude for val in sublist]\n\n if not args.location.startswith(\"/\"):\n args.location = \"/\" + str(args.location)\n\n if args.type:\n # Flatten the list\n args.type = [val for sublist in args.type for val in sublist]\n\n ThreadingSimpleAciUiLogServer.prettyprint = args.nice_output\n ThreadingSimpleAciUiLogServer.indent = args.indent\n ThreadingSimpleAciUiLogServer.strip_imdata = args.delete_imdata\n\n # Instantiate a http server\n http_server = ThreadingSimpleAciUiLogServer((\"\", args.port),\n log_requests=args.requests_log,\n location=args.location,\n excludes=args.exclude,\n app_name=args.title)\n\n if not args.cert:\n # Workaround ssl wrap socket not taking a file like object\n cert_file = tempfile.NamedTemporaryFile(delete=False)\n cert_file.write(SERVER_CERT)\n cert_file.close()\n cert = cert_file.name\n print(\"\\n+++WARNING+++ Using an embedded self-signed certificate for \" +\n \"HTTPS, this is not secure.\\n\")\n else:\n cert = args.cert\n\n # Instantiate a https server as well\n https_server = ThreadingSimpleAciUiLogServer((\"\", args.sslport),\n cert=cert,\n location=args.location,\n log_requests=args.requests_log,\n excludes=args.exclude,\n app_name=args.title)\n\n signal.signal(signal.SIGINT, sigint_handler) # Or whatever signal\n\n # Example of registering a function for a specific method. The funciton\n # needs to exist of course. Note: undefined seems to be the same as a\n # GET but the logging facility on the APIC seems to get in a state where\n # instead of setting the method properly it sets it to undefined.\n # These registered functions could then be used to take specific actions or\n # be silent for specific methods.\n # http_server.register_function(GET)\n # http_server.register_function(POST)\n # http_server.register_function(HEAD)\n # http_server.register_function(DELETE)\n # http_server.register_function(undefined)\n # http_server.register_function(EventChannelMessage)\n\n # This simply sets up a socket for UDP which has a small trick to it.\n # It won't send any packets out that socket, but this will allow us to\n # easily and quickly interogate the socket to get the source IP address\n # used to connect to this subnet which we can then print out to make for\n # and easy copy/paste in the APIC UI.\n ip_add = [(s.connect((args.apicip, 80)), s.getsockname()[0], s.close()) for\n s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]\n\n print(\"Servers are running and reachable via:\\n\")\n print(\"http://\" + str(ip_add) + \":\" + str(args.port) + args.location)\n print(\"https://\" + str(ip_add) + \":\" + str(args.sslport) + args.location +\n \"\\n\")\n print(\"Make sure your APIC(s) are configured to send log messages: \" +\n \"welcome username -> Start Remote Logging\")\n print(\"Note: If you connect to your APIC via HTTPS, configure the \" +\n \"remote logging to use the https server.\")\n serve_forever([http_server, https_server])", "def cat_int_pay():\n print(colors.Color.BLUE + \"Make the payment with digital certificate\" + colors.Color.END)\n pay_and_certificate = urllib.parse.quote(\n 'identitats.aoc.cat/o/oauth2/auth?response_type=code&client_id=tramits.'\n 'transit.cat&redirect_uri=https'\n '://multestransit.gencat.cat/sctPagaments/AppJava/loginIdCat&scope='\n 'autenticacio_usuari&access_type=online'\n '&approval_pompt=false&state=ca_ES')\n print('https://' + pay_and_certificate)\n print(colors.Color.BLUE + \"Make the payment without digital certificate\"\n + colors.Color.END)\n pay_without_certificate = urllib.parse.quote(\n 'multestransit.gencat.cat/sctPagaments/AppJava/views/expedients/cerca.'\n 'xhtml?set-locale=ca_ES')\n print('https://' + pay_without_certificate)", "def req_handler(args):\n key = _get_key(args)\n subject = get_subject_arguments()\n req = create_certificate_request(key, subject=subject, file_name=args.req_out)\n if not args.req_out:\n print(print_certificate_request(req))\n return req", "def __init__(__self__,\n resource_name: str,\n args: ServerCertificateArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def main():\n ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'\n #cert_file_name = os.path.join(os.path.dirname(__file__), \"testcert.pem\")\n\n parser = argparse.ArgumentParser(description='Parse a certificate and show days left')\n parser.add_argument('-v', '--verbose', action='store_true', help='show full certificate')\n parser.add_argument('cert', nargs='+', help='certifcate file(s)')\n args = parser.parse_args()\n for cert_file_name in args.cert:\n try:\n cert_dict = ssl._ssl._test_decode_cert(cert_file_name)\n serial = cert_dict['serialNumber']\n subject = dict(x[0] for x in cert_dict['subject'])\n issued_to = subject['commonName']\n time_left = datetime.datetime.strptime(cert_dict['notAfter'], ssl_date_fmt) - datetime.datetime.utcnow()\n if args.verbose:\n pp(cert_dict)\n ssl_expires_in(issued_to, serial, time_left)\n\n except Exception as error:\n print(\"Error decoding certificate: {:}\".format(error))", "def req_command(args):\n if args.files:\n die(\"Unexpected positional arguments\")\n\n subject_info = info_from_args(args)\n\n if subject_info.ca:\n msg('Request for CA cert')\n else:\n msg('Request for end-entity cert')\n subject_info.show(msg_show)\n\n # Load private key, create signing request\n key = load_key(args.key, load_password(args.password_file))\n req = create_x509_req(key, subject_info)\n do_output(req_to_pem(req), args, 'req')", "def upload_signing_cert(self, cert_body, user_name=None):\r\n params = {'CertificateBody' : cert_body}\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('UploadSigningCertificate', params,\r\n verb='POST')", "def fusion_api_import_appliance_certificate(self, body, api=None, headers=None, param=''):\n return self.wsc.put(body, api=api, headers=headers, param=param)", "def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }", "def request_cert():\n\n api_request = shallow_copy(props)\n\n for key in ['ServiceToken', 'Region', 'Tags', 'Route53RoleArn']:\n api_request.pop(key, None)\n\n if 'ValidationMethod' in props:\n if props['ValidationMethod'] == 'DNS':\n\n # Check that we have all the hosted zone information we need to validate\n # before we create the certificate\n for name in set([props['DomainName']] + props.get('SubjectAlternativeNames', [])):\n get_zone_for(name)\n\n del api_request['DomainValidationOptions']\n\n e['PhysicalResourceId'] = acm.request_certificate(\n IdempotencyToken=i_token,\n **api_request\n )['CertificateArn']\n add_tags()", "def create_selfsigned_certificates(name):\n pass", "def get_ssl_certificate():", "def fusion_api_import_client_certificate(self, body, api=None, headers=None):\n return self.client_certificate.post(body, api, headers)", "def _sign_cert(self, cert):\n with open(self._get_key_link(self.commonname), 'r') as private_file:\n data = private_file.read()\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM,\n data)\n cert.sign(pkey, 'sha256')" ]
[ "0.61991155", "0.5405356", "0.5383243", "0.5383243", "0.5336044", "0.5322777", "0.5262495", "0.5231196", "0.52188206", "0.5192744", "0.5103612", "0.5070024", "0.50543654", "0.5051316", "0.5041267", "0.4959368", "0.4957495", "0.49389854", "0.49315566", "0.4930307", "0.4893738", "0.4892272", "0.4891393", "0.48912954", "0.48756087", "0.48664245", "0.485084", "0.48473966", "0.48447135", "0.48438826" ]
0.6003757
1
GET on /rest/interconnectlinktopologies [Example] ${resp} = Fusion Api Get Interconnect Link Topology | |
def fusion_api_get_interconnect_link_topology(self, api=None, headers=None): return self.ilt.get(api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_topology(odl_url, odl_usr, odl_pass):\n if odl_url.endswith('/'):\n odl_url = odl_url[:-1]\n topology_url = odl_url + '/network-topology:network-topology/'\n topology_json = call_odl_api(odl_usr, odl_pass, topology_url)\n return topology_json", "def showtopologies():\n middleware.protocolObj.showTopologies()", "def select_all_topologies(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM topologies_topology\")\n \n rows = cur.fetchall()\n \n for row in rows:\n print(row)", "def obtain_port_correspondence(self):\n try:\n of_response = requests.get(self.url + \"restconf/operational/opendaylight-inventory:nodes\",\n headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)\n self.logger.debug(\"obtain_port_correspondence \" + error_text)\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"obtain_port_correspondence. Unexpected response not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected openflow response, not a dict. Wrong version?\")\n\n nodes = info.get('nodes')\n if not isinstance(nodes, dict):\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes', \"\n \"not found or not a dict: %s\", str(type(nodes)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes',not found or not a dict. \"\n \"Wrong version?\")\n\n node_list = nodes.get('node')\n if not isinstance(node_list, list):\n self.logger.error(\"obtain_port_correspondence. Unexpected response, at 'nodes':'node', \"\n \"not found or not a list: %s\", str(type(node_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, at 'nodes':'node', not found or not a list.\"\n \" Wrong version?\")\n\n for node in node_list:\n node_id = node.get('id')\n if node_id is None:\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', \"\n \"not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'id', not found. \"\n \"Wrong version?\")\n\n if node_id == 'controller-config':\n continue\n\n # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value\n # of the dpid\n # In case this is not the desired switch, continue\n if self.id != node_id:\n continue\n\n node_connector_list = node.get('node-connector')\n if not isinstance(node_connector_list, list):\n self.logger.error(\"obtain_port_correspondence. Unexpected response at \"\n \"'nodes':'node'[]:'node-connector', not found or not a list: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'node-connector', \"\n \"not found or not a list. Wrong version?\")\n\n for node_connector in node_connector_list:\n self.pp2ofi[str(node_connector['flow-node-inventory:name'])] = str(node_connector['id'])\n self.ofi2pp[node_connector['id']] = str(node_connector['flow-node-inventory:name'])\n\n node_ip_address = node.get('flow-node-inventory:ip-address')\n if node_ip_address is None:\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found. Wrong version?\")\n\n # If we found the appropriate dpid no need to continue in the for loop\n break\n\n # print self.name, \": obtain_port_correspondence ports:\", self.pp2ofi\n return self.pp2ofi\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def describe_interconnects(interconnectId=None):\n pass", "def test_get_topology_template(self):\n pass", "def GetTopologyStatus(self, *args, **kwargs):\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"getTopologyStatus\", payload=payload, response_object=None)", "def __get_topologies(self):\n return etree.tostring(self.__topologies, pretty_print=True)", "def get_switch_interface(pop_url, pop_id, uuid):\n odl_info = _get_odl_info(pop_url, pop_id)\n topology_json = get_topology(odl_info[0], odl_info[1], odl_info[2])\n results = {}\n for topology in topology_json['network-topology']['topology']:\n nodes = topology['node']\n for node in nodes:\n node_features_json = get_node_features(odl_info[0], odl_info[1], odl_info[2], node['node-id'])\n if 'node' in node_features_json:\n for node_json in node_features_json['node']:\n if 'flow-node-inventory:serial-number' in node_json \\\n and node_json['flow-node-inventory:serial-number'].strip() != 'None':\n if 'node-connector' in node_json:\n for connector in node_json['node-connector']:\n if connector['id'] == uuid:\n results['name'] = connector.get('flow-node-inventory:name', '')\n results['attributes'] = {}\n results['attributes']['port-number'] = \\\n connector.get('flow-node-inventory:port-number', '')\n results['attributes']['current-speed'] = \\\n connector.get('flow-node-inventory:current-speed', '')\n results['attributes']['flow-capable-node-connector-statistics'] = \\\n connector.get(\n 'opendaylight-port-statistics:flow-capable-node-connector-statistics', '')\n results['attributes']['advertised-features'] = \\\n connector.get('flow-node-inventory:advertised-features', '')\n results['attributes']['configuration'] = \\\n connector.get('flow-node-inventory:configuration', '')\n results['attributes']['hardware-address'] = \\\n connector.get('flow-node-inventory:hardware-address', '')\n results['attributes']['maximum-speed'] = \\\n connector.get('flow-node-inventory:maximum-speed', '')\n results['attributes']['state'] = \\\n connector.get('flow-node-inventory:state', '')\n results['attributes']['supported'] = \\\n connector.get('flow-node-inventory:supported', '')\n results['attributes']['current-feature'] = \\\n connector.get('flow-node-inventory:current-feature', '')\n results['attributes']['peer-features'] = \\\n connector.get('flow-node-inventory:peer-features', '')\n return results", "def get_of_switches(self):\n try:\n of_response = requests.get(self.url + \"restconf/operational/opendaylight-inventory:nodes\",\n headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(\"Error get_of_switches \" + error_text)\n\n self.logger.debug(\"get_of_switches \" + error_text)\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"get_of_switches. Unexpected response, not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, not a dict. Wrong version?\")\n\n nodes = info.get('nodes')\n if type(nodes) is not dict:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s\",\n str(type(info)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes', not found or not a dict.\"\n \" Wrong version?\")\n\n node_list = nodes.get('node')\n if type(node_list) is not list:\n self.logger.error(\"get_of_switches. Unexpected response, at 'nodes':'node', \"\n \"not found or not a list: %s\", str(type(node_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, at 'nodes':'node', not found \"\n \"or not a list. Wrong version?\")\n\n switch_list = []\n for node in node_list:\n node_id = node.get('id')\n if node_id is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s\",\n str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'id', not found. \"\n \"Wrong version?\")\n\n if node_id == 'controller-config':\n continue\n\n node_ip_address = node.get('flow-node-inventory:ip-address')\n if node_ip_address is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:\"\n \"ip-address', not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found. Wrong version?\")\n\n node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)\n switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])),\n node_ip_address))\n return switch_list\n\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)", "def init_topology():\n\n instances = gm.invoke_graph_instance_list()\n logger.info(\"instances %s\", instances)\n if \"error\" in instances.keys():\n logger.warning(\n \"[HttpOperationError] Probably caused by invalid IoTHub connection string. The server will terminate in 10 seconds.\"\n )\n time.sleep(10)\n sys.exit(-1)\n if instances[\"status\"] != 200:\n logger.warning(\"Failed to invoke direct method: %s\",\n instances[\"payload\"])\n return -1\n logger.info(\n \"========== Deleting %s instance(s) ==========\",\n len(instances[\"payload\"][\"value\"]),\n )\n\n for i in range(len(instances[\"payload\"][\"value\"])):\n gm.invoke_graph_instance_deactivate(\n instances[\"payload\"][\"value\"][i][\"name\"])\n gm.invoke_graph_instance_delete(\n instances[\"payload\"][\"value\"][i][\"name\"])\n\n topologies = gm.invoke_graph_topology_list()\n if instances[\"status\"] != 200:\n logger.warning(\"Failed to invoker direct method: %s\",\n instances[\"payload\"])\n return -1\n logger.info(\n \"========== Deleting %s topology ==========\",\n len(topologies[\"payload\"][\"value\"]),\n )\n\n for i in range(len(topologies[\"payload\"][\"value\"])):\n gm.invoke_graph_topology_delete(\n topologies[\"payload\"][\"value\"][i][\"name\"])\n\n logger.info(\"========== Setting default grpc/http topology ==========\")\n ret = gm.invoke_topology_set(\"grpc\")\n ret = gm.invoke_topology_set(\"http\")\n\n return 1", "def get_device_traffic(context,target):\n\n result = context.get_operation('get_interfaces_traffic')\n return result", "def get_node_features(odl_url, odl_usr, odl_pass, node_id):\n if odl_url.endswith('/'):\n odl_url = odl_url[:-1]\n inventory_url = odl_url + '/opendaylight-inventory:nodes/node/'\n node_url = inventory_url + node_id\n topology_json = call_odl_api(odl_usr, odl_pass, node_url)\n return topology_json", "def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops", "def connectOntology(ontology, endpoint=None):\r\n world = World()\r\n world.get_ontology(ontology).load()\r\n graph = world.as_rdflib_graph()\r\n if graph:\r\n return graph\r\n else:\r\n print(\"connection failed\")\r\n return", "def stoptopology(topologyName='all'):\n if topologyName == 'all':\n middleware.protocolObj.stopAllProtocols()\n return\n\n queryData = {'from': '/',\n 'nodes': [{'node': 'topology', 'properties': ['name'], 'where': [{'property': 'name', 'regex': topologyName}]}]\n }\n queryResponse = middleware.ixn.query(data=queryData)\n try:\n topologyObj = queryResponse.json()['result'][0]['topology'][0]['href']\n except:\n print('\\nError: Verify the topologyName', topologyName)\n middleware.protocolObj.stopTopology([topologyObj])", "def describe_interconnect_loa(interconnectId=None, providerName=None, loaContentType=None):\n pass", "def checkOntoSimilarity(ontology_id):\n # print('checkOntoSimilarity() =>', ontology_id)\n url = cfg.ontology_sim + '/status'\n res = requests.post(url, json={'ontologyId': ontology_id})\n resp = res.json()\n resp['statusCode'] = res.status_code\n return resp #resp['statusCode'] = 200 if ontology exists and 404 otherwise", "def openie(text: str) -> List[Tuple[str, str, str]]:\n\n client = env.resolve('servers.java')\n verbose.info('Extracting triples using OpenIE at: ' + client['address'], caller=openie)\n return requests.get('%s/openie/triples' % client['address'], params={'text': text}).json()", "def show_mpls_traffic_eng_tunnels_brief(\n self, full_output: bool = False, to_json: bool = False,\n ):\n resp = self.get(self.get_filter())\n\n if full_output:\n return resp\n resp = xmltodict.parse(resp.xml)\n resp = resp[\"rpc-reply\"][\"data\"]\n\n if to_json:\n return json.dumps(resp)\n return resp", "def _get_odl_info(pop_url, pop_id):\n try:\n graph_db = neo4j.Graph(pop_url)\n index = ('pop', 'uuid', pop_id)\n pop = neo_resource.get_node(graph_db, index)\n if pop:\n properties = dict(pop.properties)\n if 'occi.epa.pop.odl_url' in properties and 'occi.epa.pop.odl_name' in properties \\\n and 'occi.epa.pop.odl_password' in properties:\n return properties['occi.epa.pop.odl_url'], properties['occi.epa.pop.odl_name'],\\\n properties['occi.epa.pop.odl_password']\n\n except Exception:\n raise HTTPError(404, 'Error connecting to graph_url: ' + str(pop_url))\n raise HTTPError(404, 'Resource not found: Epa-Pop-Id: ' + str(pop_id))", "def gmaps_optical_nodes(request):\n # Cypher query to get all cables with cable type fiber that are connected\n # to two optical node.\n q = \"\"\"\n MATCH (cable:Cable)\n WHERE cable.cable_type = \"Dark Fiber\"\n MATCH (cable)-[Connected_to]->(port)\n WITH cable, port\n MATCH (port)<-[:Has*0..]-(equipment)\n WHERE (equipment:Optical_Node) AND NOT equipment.type =~ \"(?i).*tss.*\"\n WITH cable, port, equipment\n MATCH p2=(equipment)-[:Located_in]->()<-[:Has*0..]-(loc)\n WHERE (loc:Site)\n RETURN cable, equipment, loc\n \"\"\"\n result = nc.query_to_list(nc.graphdb.manager, q)\n nodes = {}\n edges = {}\n for item in result:\n node = {\n 'name': item['equipment']['name'],\n 'url': helpers.get_node_url(item['equipment']['handle_id']),\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n coords = {\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n edge = {\n 'name': item['cable']['name'],\n 'url': helpers.get_node_url(item['cable']['handle_id']),\n 'end_points': [coords]\n }\n nodes[item['equipment']['name']] = node\n if item['cable']['name'] in edges:\n edges[item['cable']['name']]['end_points'].append(coords)\n else:\n edges[item['cable']['name']] = edge\n response = HttpResponse(content_type='application/json')\n json.dump({'nodes': list(nodes.values()), 'edges': list(edges.values())}, response)\n return response", "def test_get_hyperflex_cluster_by_moid(self):\n pass", "def _query_api(\n master_url=settings.OPENSHIFT_API['NP']['OPENSHIFT_MASTER'],\n api_token=settings.OPENSHIFT_API['NP']['API_TOKEN'],\n endpoint='/oapi/v1/buildconfigs'):\n\n openshift_api_url = 'https://' + master_url\n openshift_api_get_endpoint = openshift_api_url + endpoint\n bearer_token_header = {'Authorization': 'Bearer ' + api_token }\n\n try:\n response = requests.get(openshift_api_get_endpoint,headers=bearer_token_header, timeout=2.0)\n except requests.ConnectTimeout as e:\n logger.error(e)\n return None\n except requests.ConnectionError as e:\n logger.error(e)\n return None\n\n if not response.ok:\n logger.error(response.status_code)\n return None\n else:\n return response", "def test_christiandoctrines_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/christiandoctrines',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def get_switch_interfaces(pop_url, pop_id):\n odl_info = _get_odl_info(pop_url, pop_id)\n topology_json = get_topology(odl_info[0], odl_info[1], odl_info[2])\n results = []\n for topology in topology_json['network-topology']['topology']:\n nodes = topology['node']\n for node in nodes:\n node_features_json = get_node_features(odl_info[0], odl_info[1], odl_info[2], node['node-id'])\n if 'node' in node_features_json:\n for node_json in node_features_json['node']:\n if 'flow-node-inventory:serial-number' in node_json \\\n and node_json['flow-node-inventory:serial-number'].strip() != 'None':\n if 'node-connector' in node_json:\n for connector in node_json['node-connector']:\n results.append(connector['id'])\n return results", "def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]", "def token_pathway_summary(token, pathway, resource='TOTAL', page='1', page_size='1', by='all'):\n\n if isinstance(page_size, NumberTypes):\n page_size = str(page_size)\n\n if isinstance(page, NumberTypes):\n page = str(page)\n\n headers = {\n 'accept': 'application/json',\n }\n\n if by.lower() in 'all':\n params = (\n ('resource', resource),\n )\n\n url = 'https://reactome.org/AnalysisService/token/%s/found/all/%s' % (token, pathway)\n\n elif by.lower() in 'entities':\n params = (\n ('resource', resource),\n ('page', page),\n ('pageSize', page_size),\n )\n\n url = 'https://reactome.org/AnalysisService/token/%s/found/entities/%s' % (token, pathway)\n\n elif by.lower() in 'interactors':\n params = (\n ('resource', resource),\n ('page', page),\n ('pageSize', page_size),\n )\n\n url = 'https://reactome.org/AnalysisService/token/%s/found/interactors/%s' % (token, pathway)\n\n try:\n response = requests.get(url=url, headers=headers, params=params)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n print('Status code returned a value of %s' % response.status_code)" ]
[ "0.60800683", "0.5555795", "0.5415442", "0.52419", "0.5189096", "0.51391387", "0.5115366", "0.505326", "0.5036325", "0.502502", "0.5016021", "0.4979696", "0.49740812", "0.48765007", "0.48370096", "0.48106188", "0.48058784", "0.47977293", "0.4787885", "0.4776432", "0.47754562", "0.4763605", "0.4710641", "0.46959755", "0.4677403", "0.46763209", "0.46754178", "0.4665213", "0.4662034", "0.4658839" ]
0.6132166
0
Gets a default or paginated collection of Interconnect Link Sets [Arguments]
def fusion_api_get_internal_link_sets(self, uri=None, param='', api=None, headers=None): return self.ils.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link_to_set(page):\n #s = set()\n links = Measurements.get_all_links(page)\n s = set(links)\n return s", "def links(self):\n\t\treturn self.list_of_links", "def get_links(self):\r\n return self.links", "def getLinks(self):\n return self.pageLinks", "def get_links(self):\n return (link for link in self.links)", "def limit(self, nodeset):\n sections = nodeset.filter(is_report_issue_section=True).subset()\n links = sections.filter(uri_scheme__exists=True)\n links = links.filter(\n lambda node: node['uri_scheme'].lower() in ['http', 'https'])\n return links", "def iter_links(self):", "def get_all(self, marker=None, limit=None, sort_key='id',\n sort_dir='asc'):\n context = pecan.request.context\n return self._get_endpoints_collection(marker, limit, sort_key, sort_dir)", "def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links", "def getExpandedLinks():", "def links(self):\n return self.container['links']", "def paginated_call(self) -> global___Snippet.ClientCall:", "def get_site_collection(self, request):\n\n objects = self.get()\n\n groups = [\n ('topics', request.translate(_(\"Topics\"))),\n ('news', request.translate(_(\"Latest news\"))),\n ('imagesets', request.translate(_(\"Photo Albums\"))),\n ('forms', request.translate(_(\"Forms\"))),\n ('directories', request.translate(_(\"Directories\"))),\n ('resources', request.translate(_(\"Resources\"))),\n ]\n\n links = []\n\n for id, label in groups:\n for obj in objects[id]:\n # in addition to the default url/name pairings we use a group\n # label which will be used as optgroup label\n links.append({\n 'group': label,\n 'name': obj.title,\n 'url': request.link(obj)\n })\n\n return links", "def getSets():", "def get_links(self):\r\n return self.__links", "def get_recipe_links(cuisine, pages, collection):\n recipe_links = []\n for page in xrange(0, pages):\n sleep(SCRAPING_REQUEST_STAGGER)\n recipe_links.extend(get_cuisine_search_pages(cuisine, page))\n if collection:\n recipe_links.extend(get_cuisine_collection_page(cuisine))\n cuisine_recipes = get_recipe_details(list(set(recipe_links)))\n return cuisine_recipes", "def URLs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def URLs(self, default=[{}]):\n tmp = self.data.get('urls', default)\n return [HEP.URLObject(i) for i in tmp]", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def getLinks(self):\n\n return self.links", "def References(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('references', default)\n return [HEP.ReferenceObject(i) for i in tmp]", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def fetch_urls(client: Client, limit: int = -1) \\\n -> List[Dict]:\n iterator = client.get_urls()\n\n indicators = []\n\n if limit > 0:\n iterator = iterator[:limit]\n for item in iterator:\n value_ = item.get('value')\n type_ = item.get('type')\n raw_data = {\n 'value': value_,\n 'type': type_,\n }\n\n # Create indicator object for each value.\n # The object consists of a dictionary with required and optional keys and values, as described blow.\n for key, value in item.items():\n raw_data.update({key: value})\n\n indicator_obj = {\n # The indicator value.\n 'value': value_,\n 'type': type_,\n # The indicator type as defined in Cortex XSOAR.\n # One can use the FeedIndicatorType class under CommonServerPython to populate this field.\n # The name of the service supplying this feed.\n 'service': 'NucleonCyberFeed',\n # A dictionary that maps values to existing indicator fields defined in Cortex XSOAR.\n # One can use this section in order to map custom indicator fields previously defined\n # in Cortex XSOAR to their values.\n 'fields': {\n },\n # A dictionary of the raw data returned from the feed source about the indicator.\n 'rawJSON': raw_data\n }\n indicators.append(indicator_obj)\n\n return indicators", "def issuelinks_all(request, format=None):\n if request.method == 'GET':\n issuelinks = IssueLink.objects.all()\n serializer = IssueLinkSerializer(issuelinks, many=True)\n return Response(serializer.data)", "def get_links(self):\n return self.__data['links']" ]
[ "0.6008201", "0.59382796", "0.58206034", "0.56628245", "0.56290984", "0.5565631", "0.556245", "0.5543598", "0.5481708", "0.54662657", "0.5428031", "0.5412011", "0.5406529", "0.5406306", "0.5398659", "0.53737754", "0.53480864", "0.5305522", "0.5294644", "0.5294644", "0.5294644", "0.5294644", "0.5294644", "0.5243626", "0.52389127", "0.5217437", "0.5197918", "0.5182141", "0.5177849", "0.51758397" ]
0.62790096
0
Adds a license to the appliance [Arguments]
def fusion_api_add_license(self, key=None, license_type='LicenseV500', api=None, headers=None): return self.license.add(key, license_type, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def license(*args, borrow: bool=True, info: bool=True, isBorrowed: bool=True, isExported:\n bool=True, isTrial: bool=True, licenseMethod: bool=True, productChoice: bool=True,\n r: bool=True, showBorrowInfo: bool=True, showProductInfoDialog: bool=True, status:\n bool=True, usage: bool=True, **kwargs)->AnyStr:\n pass", "def install_license():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><license><install></install></license></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def add_license(self, contents):\n buf_size = len(contents)\n buf = (ctypes.c_char * (buf_size + 1))(*contents.encode())\n\n res = self._dll.JLINK_EMU_AddLicense(buf)\n\n if res == -1:\n raise errors.JLinkException('Unspecified error.')\n elif res == -2:\n raise errors.JLinkException('Failed to read/write license area.')\n elif res == -3:\n raise errors.JLinkException('J-Link out of space.')\n\n return (res == 0)", "def ibmi_add_license_key(adapter, lpar_uuid, license_key):\n resp = adapter.read(lpar.LPAR.schema_type, root_id=lpar_uuid,\n suffix_type=c.SUFFIX_TYPE_DO,\n suffix_parm=_SUFFIX_PARM_ADD_LICENSE)\n job_wrapper = job.Job.wrap(resp.entry)\n job_parms = [job_wrapper.create_job_parameter('licKey', license_key)]\n try:\n job_wrapper.run_job(lpar_uuid, job_parms=job_parms)\n except Exception:\n LOG.exception(_('IBMi Key Injection Failed'))\n raise", "def add_license(fitsfile, lic):\n try:\n hdulist = pyfits.open(fitsfile, mode=\"update\")\n except:\n print(\"Oops! Something's gone wrong :-(\", file=sys.stderr)\n else:\n prihdr = hdulist[0].header\n prihdr[\"LICENSE\"] = liclist[lic][\"name\"]\n prihdr[\"LICVER\"] = liclist[lic][\"ver\"]\n prihdr[\"LICURL\"] = liclist[lic][\"url\"]\n add_comments(prihdr)\n hdulist.close()", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def show_license(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(lic)\n ctx.exit()", "def license(self, license):\n\n self._license = license", "def add_fc_licenses(licenses_obj):\n logger.info(\"Add FC License to appliance\")\n navigate()\n total_len = len(licenses_obj)\n failure_cnt = 0\n for n, lic_obj in enumerate(licenses_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total_len, '-' * 14))\n logger.info(\"Adding FC license with type '{0}'\".format(lic_obj.type))\n if lic_obj.type != FusionUIConst.CONST_LICENSE_ONEVIEW_SYNERGY_FCUPGRADE:\n err_msg = \"The license type is not FC licenses, please correct.\"\n ui_lib.fail_test(err_msg)\n if not _add_license_action(lic_obj):\n logger.warn(\"Failed to add license with type {}\".format(lic_obj.type))\n msg = FusionUIBase.get_error_message_from_dialog()\n if msg[0] is True:\n logger.warn(msg[1])\n else:\n logger.warn(\"Failed to get error message in dialog\")\n AddLicense.click_cancel_button()\n failure_cnt += 1\n continue\n if VerifyLicense.verify_oneview_fcupgrade_license_exists(fail_if_false=False):\n expected_text = \"%s license%s available\" % ((n + 1), '' if n == 0 else 's')\n if not VerifyLicense.verify_available_fcupgrade_license(expected_text, timeout=5, fail_if_false=False):\n failure_cnt += 1\n return False if failure_cnt else True", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def create_license(self) -> None:\n # copy the license file from the template to the package folder\n # option : append other license files\n shutil.copy(CONFIG.template_path / \"LICENSE.md\", self.package_path)", "def add(self, arguments):\n url = arguments['<location>']\n if url:\n name = arguments['<name>']\n else:\n url = arguments['<name>']\n name = None\n version = arguments['--box-version']\n force = arguments['--force']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n utils.add_box(url, name=name, version=version, force=force, requests_kwargs=requests_kwargs)", "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def register_license(file_path):\n result = mjlib.mj_activate(file_path)\n return result", "def __init__(__self__,\n resource_name: str,\n args: LicenseConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def set_license_analytics(self, license_params: dict) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.OPT_IN,\n body=license_params,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def putlicensecode(self,code): # 3\n if code is None:\n code_ = None\n else:\n try:\n code_ = memoryview(code)\n except TypeError:\n try:\n _tmparr_code = array.array(\"i\",code)\n except TypeError:\n raise TypeError(\"Argument code has wrong type\")\n else:\n code_ = memoryview(_tmparr_code)\n \n else:\n if code_.format != \"i\":\n code_ = memoryview(array.array(\"i\",code))\n \n if code_ is not None and len(code_) != value.license_buffer_length:\n raise ValueError(\"Array argument code has wrong length\")\n res = self.__obj.putlicensecode(code_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def putlicensedebug(self,licdebug_):\n res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def main():\n licensify(_parse_args())", "def activate_license(self):\n response = self.client.activate_license()\n if str(response[\"result\"][\"code\"]) == \"0\" and str(response[\"data\"][\"LicenseActiveResult\"]) == \"0\":\n self.module.exit_json(msg=\"Activate license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Activate license file fail.{0}\".format(response['result']['description']),\n status='fail', changed=False)", "def add(self, name, *args):\n\n self._add(False, self.authority, name, *args)", "def AddEdition(parser):\n edition_flag = base.ChoiceArgument(\n '--edition',\n required=False,\n choices=['enterprise', 'enterprise-plus'],\n default=None,\n help_str='Specifies the edition of Cloud SQL instance.',\n )\n edition_flag.AddToParser(parser)", "def put(self, license_handler):\n\n full_license = request.data\n return license_handler.upload_license(full_license)", "def accept_license():\r\n msg, status = \"\", True\r\n\r\n try:\r\n sleep(5)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'Click on license accept button'\r\n flag1 = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n \r\n \r\n\r\n status = False if not (flag1) else True\r\n else:\r\n \r\n 'Click on Agree button in EULA page for IOS'\r\n flag = ui_controls.button(get_obj_identifier('license_accept_btn'))\r\n status = flag\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n\r\n return status, msg", "def putlicensepath(self,licensepath_):\n if isinstance(licensepath_,unicode):\n licensepath_ = licensepath_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putlicensepath(self.__nativep,licensepath_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def putlicensewait(self,licwait_):\n res = __library__.MSK_XX_putlicensewait(self.__nativep,licwait_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def qs_license():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('License', level=1)\r\n lic_metric = ['lef', 'serial', 'name', 'organization', 'product', 'numberOfCores', 'isExpired', 'expiredReason', 'isBlacklisted', 'isInvalid']\r\n qs_lic = get_qlik_sense.get_license()\r\n num_of_metric = len(qs_lic)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(qs_lic)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(lic_metric[metric])\r\n row.cells[1].text = str(qs_lic[metric][0])\r\n document.add_page_break()", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)" ]
[ "0.67941815", "0.6771292", "0.6694002", "0.6305626", "0.6126136", "0.59274495", "0.591613", "0.5889053", "0.58426124", "0.5789727", "0.5706304", "0.5654645", "0.5591205", "0.55902547", "0.5578288", "0.5553778", "0.55174893", "0.5514362", "0.5492471", "0.5481856", "0.54489183", "0.54481095", "0.5424534", "0.54179734", "0.54166794", "0.5399873", "0.5399552", "0.53853667", "0.53851104", "0.5371016" ]
0.70745283
0
Deletes a License from the appliance based on uri [Arguments]
def fusion_api_remove_license(self, uri=None, api=None, headers=None): return self.license.delete(uri=uri, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)", "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def catalog_delete(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete(args.path, headers)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def delete(self, uri, where, selectionArgs):\n pass", "def delete(self, uri, **kwargs):\n return self.session.delete(uri, **kwargs)", "def delete_license(key_name=None):\n\n if not key_name:\n return False, \"You must specify a key_name.\"\n else:\n query = {\n \"type\": \"op\",\n \"cmd\": \"<delete><license><key>{}</key></license></delete>\".format(key_name),\n }\n\n return __proxy__[\"panos.call\"](query)", "def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data", "def adel(url, **kwargs):\n return requests.delete(url, **kwargs)", "def delete(cls, uri):\n return cls._perform_request(uri, 'DELETE')", "def delete(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to delete')\n\n # Send DELETE request\n return requests.delete(self.REQUEST_URL + str(self.args.id))", "def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def fusion_api_delete_repository(self, uri, api=None, headers=None):\n return self.repository.delete(uri=uri, api=api, headers=headers)", "def fusion_api_delete_resource(self, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.delete(uri, headers)", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete(url, **kwargs):\n\n return request('delete', url, **kwargs)", "def delete(\n self, uri, resource=None, logon_required=True, renew_session=True):\n try:\n self._urihandler.delete(self._hmc, uri, logon_required)\n except HTTPError as exc:\n new_exc = zhmcclient.HTTPError(exc.response())\n new_exc.__cause__ = None\n raise new_exc # zhmcclient.HTTPError\n except ConnectionError as exc:\n new_exc = zhmcclient.ConnectionError(exc.message, None)\n new_exc.__cause__ = None\n raise new_exc # zhmcclient.ConnectionError", "def delete(self, url):\n return self._request('DELETE', url)", "def _delete(self, url, **kwargs):\n return self._call('DELETE', url, kwargs)", "def delete(self, uri, body=None, headers=None, auth=False):\n return self.send_request('DELETE', uri, body, headers, auth)", "def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def catalog_drop(self, args):\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete_ermrest_catalog(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def delete(self, path):\n client = self.connect(VAULT_TOKEN)\n client.delete(path)", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def _delete(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n config = misc_utils.resolve_config(\n kwargs.pop('config', None),\n kwargs.pop('config_file', None),\n required=False\n )\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name),\n method='DELETE',\n config=config\n )", "def catalog_alias_delete(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n alias.delete_ermrest_alias(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e", "def delete(self, method, uri, query_param, request_param, headers, **kwargs):\n raise NotImplementedError" ]
[ "0.68128294", "0.6762246", "0.66312593", "0.65794665", "0.63812053", "0.6283886", "0.627558", "0.62061", "0.61452", "0.61128074", "0.61114347", "0.60926473", "0.6088915", "0.60815895", "0.6079978", "0.60625523", "0.60364693", "0.60240626", "0.5994457", "0.5975", "0.59584343", "0.5943502", "0.59417003", "0.59365886", "0.591033", "0.58868265", "0.588148", "0.58546615", "0.5837624", "0.5834669" ]
0.7439182
0
Deletes All Licenses from the appliance [Arguments] None [Example] Fusion API Remove All Licenses
def fusion_api_remove_all_licenses(self): all_licenses = self.fusion_api_get_licenses() for lic in all_licenses['members']: response = self.fusion_api_remove_license(uri=lic['uri']) if response.status_code != 204: logger._log_to_console_and_log_file("Unable to delete license with key: %s" % lic['key']) logger._log_to_console_and_log_file("Status code of response: %s" % response.status_code) BuiltIn().fail("Expected status code was 204") else: logger._log_to_console_and_log_file("Successfully deleted license with key: %s" % lic['key'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_remove_license(self, uri=None, api=None, headers=None):\n return self.license.delete(uri=uri, api=api, headers=headers)", "def licensecleanup(): # 3\n res = _msk.Env.licensecleanup()\n if res != 0:\n raise Error(rescode(res),\"\")", "def del_license(fitsfile, keys):\n try:\n for key in keys:\n pyfits.delval(fitsfile, key)\n except KeyError:\n print(\"License information not found.\", file=sys.stderr)", "def erase_licenses(self):\n res = self._dll.JLINK_EMU_EraseLicenses()\n return (res == 0)", "def licensecleanup():\n res = __library__.MSK_XX_licensecleanup()\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything", "def _delete_rights(self):\n for right in self.rights:\n right.delete()", "def remove_all(ctx):\n skale = ctx.obj['skale']\n cnt = 0\n for sname in get_all_schains_names(skale):\n skale.manager.delete_schain(sname)\n cnt += 1\n print(f'Success. {cnt} schains were removed')", "def delete_all(pat: str, resource_registration_endpoint: str, secure: bool = False):\n \n all_resources = list(pat,resource_registration_endpoint,secure)\n\n for resource_id in all_resources:\n delete(pat, resource_registration_endpoint, resource_id, secure)", "def delete_all():\n answer = ['YES', 'NO']\n str = rs.GetString(\"Delete all objects?\", 'YES', answer)\n\n if str == 'YES':\n obs = rs.ObjectsByType(0)\n rs.DeleteObjects(obs)\n elif str == 'NO':\n pass\n else:\n sys.exit()", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def RemoveAll(self):\n\t\tcontacts = self.GetContactList()\n\t\t\n\t\tfor contact in contacts:\n\t\t\tself.BatchEnqueue('delete', contact)\n\t\tself.ExecuteBatchQueue()", "def _delete_all_acls(self):\n for acl_ref in self.created_entities['acl']:\n entity_ref = acl_ref.replace(\"/acl\", \"\")\n blank_acl_entity = self.barbicanclient.acls.create(\n entity_ref=entity_ref)\n blank_acl_entity.remove()", "def DeleteAll(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def flask_delete_all_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n result = DeviceHandler.delete_all_devices(token)\n\n LOGGER.info('Deleting all devices.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n\n return format_response(e.error_code, e.message)", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def do(self):\n\n self.logger.debug(\"Entering RemoveAllReceptors()\")\n\n device=self.target\n\n # For LMC0.6.0: use a helper instead of a command so that it doesn't care about the obsState\n device._remove_receptors_helper(device._receptors[:])\n\n message = \"CBFSubarray RemoveAllReceptors command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)", "def PurgeAll(self):\n\t\tself.acad.ActiveDocument.PurgeAll()", "def delete_all(self):\n raise NotImplementedError()", "def delete_license(key_name=None):\n\n if not key_name:\n return False, \"You must specify a key_name.\"\n else:\n query = {\n \"type\": \"op\",\n \"cmd\": \"<delete><license><key>{}</key></license></delete>\".format(key_name),\n }\n\n return __proxy__[\"panos.call\"](query)", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n click_checkbox_by_id('select_all_check')\n click_button_by_id('btn_ok')\n click_button_by_index(1)\n else:\n goback()\n\n sleep(2) #take a rest to wait view ...", "def clear_gateways():\n print('#### Removing federated agw from orc8r and deleting certs ####')\n subprocess.check_call(\n 'fab --fabfile=dev_tools.py deregister_federated_agw',\n shell=True, cwd=agw_path,\n )\n print('#### Removing feg gw from orc8r and deleting certs####')\n subprocess.check_call('fab deregister_feg_gw', shell=True, cwd=feg_path)", "def erase_all(self):\n result = self._lib.NRFJPROG_erase_all()\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)", "def delete_all_course_assets(self, course_key):\r\n raise NotImplementedError", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def clear_all_devices():\n adapter = get_adapter()\n for key in devices_by_adr.keys():\n device = get_device(key)\n try:\n adapter.RemoveDevice(device) \n except DBusException:\n print(\"could not remove\", device)", "def delete_all_users():\n\tUser.drop_collection()", "def delete_all(self):\n return self.context.delete(\"/ckks/all\", None,\n \"CKKS:: failed deleting all the CKKS data\"\n )" ]
[ "0.63146925", "0.62015146", "0.5996768", "0.59178025", "0.58467275", "0.57861876", "0.5692768", "0.5643447", "0.56258345", "0.5546763", "0.55434954", "0.54909426", "0.54482716", "0.5426149", "0.5419495", "0.53560823", "0.53535885", "0.5351733", "0.5335338", "0.5301046", "0.5269437", "0.52656895", "0.52469426", "0.52451146", "0.52436304", "0.5243536", "0.52272004", "0.5218941", "0.5198336", "0.5196903" ]
0.8171152
0
Gets a default or paginated collection of Logical Downlinks. [Arguments]
def fusion_api_get_logical_downlink(self, uri=None, api=None, headers=None, param=''): return self.ld.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getExpandedLinks():", "def links(self):\n\t\treturn self.list_of_links", "def get_links(self, records: list = None, collection: bool = False) -> List[dict]:\n if not records:\n records = []\n if not collection:\n for record in records:\n links = [\n self._get_self_record(record).to_dict(),\n self._get_parent_record().to_dict(),\n self._get_root_record().to_dict(),\n ]\n record.update({\"links\": links})\n return records\n else:\n links = [self._get_self_collection().to_dict()]\n return links", "def get_links(self):\r\n return self.links", "def list_links(self, node, dd):\n link_list = dd[node]['links']\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names", "def links(self):\n return self.container['links']", "def __fetch_data(file_spider, links_titles, white_list, target_format, time_out, path,\n recursion_depth, recursion_max_depth, prev_link_size, first_run):\n if file_spider == 'yes':\n Crawler.__check_target_link(links_titles, white_list, target_format,\n time_out, path)\n return [recursion_depth, prev_link_size, first_run]\n\n else:\n url_size = len(str(links_titles[0]).rstrip('/').split('/'))\n recursion_settings = Crawler.__count_recursion_depth(url_size, recursion_depth,\n prev_link_size, first_run)\n recursion_depth = recursion_settings[0]\n prev_link_size = recursion_settings[1]\n first_run = recursion_settings[2]\n\n if recursion_depth > recursion_max_depth and recursion_depth != 0:\n return [recursion_depth, prev_link_size, first_run]\n else:\n Crawler.__download_link_contents(links_titles, time_out, path)\n return [recursion_depth, prev_link_size, first_run]", "def get_links(response: GenericResponse, endpoint: Endpoint, field: str) -> Sequence[Link]:\n responses = endpoint.definition.resolved[\"responses\"]\n if str(response.status_code) in responses:\n response_definition = responses[str(response.status_code)]\n else:\n response_definition = responses.get(\"default\", {})\n links = response_definition.get(field, {})\n return [Link.from_definition(name, definition, endpoint) for name, definition in links.items()]", "def links(self):\r\n return links.RepoLinks(self)", "def getLinks(self):\n return self.pageLinks", "def get_links(self):\n return (link for link in self.links)", "def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def get_logical_links(self, osdf_config):\n\n config = osdf_config.deployment\n aai_url = config[\"aaiUrl\"]\n aai_req_url = aai_url + config[\"aaiGetLinksUrl\"]\n\n response = requests.get(aai_req_url,headers=self.aai_headers,auth=HTTPBasicAuth(\"AAI\", \"AAI\"),verify=False)\n if response.status_code == 200:\n return response.json()", "def get_links(self):\r\n return self.__links", "def descendants(self, link=None):\n\n return self.relatives(link=link, ancestor=False)", "def get_urls(self):\n urls = []\n params = ['<{}>'.format(x) for x in self.args]\n args_length = len(self.args) - len(self.defaults)\n for i in range(len(self.defaults) + 1):\n index = -i if i > args_length else None\n urls.append(self.get_url(params[:index]))\n return urls", "def get_links(self) -> List[str]:\n return self.__links", "def get_drefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_to( ea, xrf )\r\n\treturn ret", "def links (self):\n return (link for src, dst, link in self.network.edges_iter(data=True) if\n link.type == Link.STATIC or link.type == Link.DYNAMIC)", "def get_links(self):\n return self.__data['links']", "def get_all_disks():\n return DISKS_API.get(abs_link=False)", "def get_rev_links(self, model, rel, *item_types):\n return self.storage().get_rev_links(model, rel, *item_types)", "def get_drefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_from( ea, xrf )\r\n\treturn ret", "def find_all_downstream_from_seed(lt_links, s_seed, ls_ends=[], b_downstream_ended_only_ls_ends=False):\n llt_downstreams = []\n dic_s_start_l_links = {}\n for t_link in lt_links:\n dic_s_start_l_links.setdefault(t_link[0],[]).append(t_link)\n \n llt_stack = []\n for t_link in dic_s_start_l_links[s_seed]:\n llt_stack.append([t_link])\n \n while llt_stack:\n lt_downstream = llt_stack.pop()\n if lt_downstream[-1][-1] in ls_ends:\n llt_downstreams.append(lt_downstream)\n continue\n if lt_downstream[-1][-1] not in dic_s_start_l_links.keys():\n if b_downstream_ended_only_ls_ends:\n continue\n else:\n llt_downstreams.append(lt_downstream)\n continue\n for t_link in dic_s_start_l_links[lt_downstream[-1][-1]]:\n llt_stack.append(lt_downstream+[t_link])\n \n return llt_downstreams", "def fusion_api_get_internal_link_sets(self, uri=None, param='', api=None, headers=None):\n return self.ils.get(uri=uri, api=api, headers=headers, param=param)", "def get_urls_command(client: Client,\n args: Dict[str, Any]\n ) -> CommandResults:\n limit = int(args.get('limit', '10'))\n urls = fetch_urls(client, limit)\n human_readable = tableToMarkdown(\n 'URL indicators from NucleonCyberFeed:',\n urls,\n headers=['value', 'type'],\n headerTransform=string_to_table_header,\n removeNull=True\n )\n return CommandResults(\n readable_output=human_readable,\n outputs_prefix='NucleonCyber.Indicators.url',\n outputs_key_field='url',\n raw_response=urls,\n outputs=urls,\n )", "def listRelatives(*args, allDescendents: bool=True, allParents: bool=True, children: bool=True,\n fullPath: bool=True, noIntermediate: bool=True, parent: bool=True, path:\n bool=True, shapes: bool=True, type: Union[AnyStr, List[AnyStr]]=\"\",\n **kwargs)->List[AnyStr]:\n pass", "def links(self):\n return self._links", "def links(self):\n return self._links" ]
[ "0.5378087", "0.53444034", "0.5338815", "0.5180933", "0.5152165", "0.51453227", "0.5125656", "0.50781953", "0.50676686", "0.5048994", "0.5026012", "0.50027025", "0.50002", "0.4970557", "0.49556756", "0.49449378", "0.49409524", "0.4921925", "0.49193707", "0.49180612", "0.49162102", "0.49161625", "0.4903581", "0.4879562", "0.48623237", "0.48614305", "0.4846033", "0.48415408", "0.4835111", "0.4835111" ]
0.60641724
0
Create the LIG payload
def fusion_api_create_lig_payload(self, **kwargs): return self.lig.make_body(**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def payload(self):", "def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}", "def build_payload(self, **kwargs):\n\n return None", "def fusion_api_create_sas_lig_payload(self, body, api=None):\n return self.saslig.make_body(body=body, api=api)", "def payload_creation(self, id, data):\n\n payload = {\n 'UUID': self.uuid,\n 'ID': id,\n 'RATE': self.rate,\n 'GPIO': data[2],\n 'DDL': self.ddl,\n 'VALUE': data[1],\n 'TIME': data[0]\n }\n return payload", "def __init__(self, rpc, payload):\n super(PayloadModule, self).__init__(rpc, 'payload', payload)", "def LATCH_create(bytes=None, rotationInvariance=None, half_ssd_size=None, sigma=None): # real signature unknown; restored from __doc__\n pass", "def generate_payload(req):\n logging.info(f'Do something with {req}')\n return json.dumps({\n \"msg1\": \"Hello world 1!\",\n \"msg2\": \"Hello world 2!\"\n })", "def create_payload(self):\n doc = etree.fromstring(self.message)\n self.payload = etree.tostring(doc, encoding=\"utf-8\")\n self.payload = urlsafe_b64encode(self.payload).decode(\"ascii\")\n return self.payload", "def make_payload(self):\n return Payload(names=self.names)", "def __init__(self, payload):\n self.payload = payload", "def craft_payload(self):\n\n junk_size = randint(1, 16)\n\n junk_data = bytearray(\n [\n choice([i for i in range(0, 256)])\n for i in range(0, junk_size)\n ])\n\n challenge_request = bytes(b'\\x00' * 0x100)\n\n payload = \\\n struct.pack('B', junk_size) + \\\n junk_data + \\\n struct.pack('B', (junk_size*2 & 0xff)) + \\\n challenge_request\n\n return payload", "def format_payload(enc, **kwargs):\n payload = {\"enc\": enc}\n load = {}\n for key in kwargs:\n load[key] = kwargs[key]\n payload[\"load\"] = load\n return package(payload)", "def generatePayload(\n\t\tself, \n\t\tlistener: str, \n\t\tartifact_type: 'ArtifactType', \n\t\tstaged: bool = False, \n\t\tx64: bool = True\n\t) -> bytes:\n\t\tif x64:\n\t\t\tarch = \"x64\"\n\t\telse:\n\t\t\tarch = \"x86\"\n\n\t\tif staged:\n\t\t\tfunction = \"artifact_stager\"\n\t\telse:\n\t\t\tfunction = \"artifact_payload\"\n\n\t\tcmd = f\"return base64_encode({function}('{listener}', '{artifact_type.value}', '{arch}'))\"\n\t\tencoded_bytes = self.ag_get_object(cmd, timeout=30000)\n\t\t# We converted the bytes to b64 for transferring, so now convert them back\n\t\treturn base64.b64decode(encoded_bytes)", "def package(payload):\n return salt.utils.msgpack.dumps(payload)", "def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)", "def _generate_payload(self, command, data=None, gwId=None, devId=None, uid=None):\n json_data = command_override = None\n\n if command in payload_dict[self.dev_type]:\n if \"command\" in payload_dict[self.dev_type][command]:\n json_data = payload_dict[self.dev_type][command][\"command\"]\n if \"command_override\" in payload_dict[self.dev_type][command]:\n command_override = payload_dict[self.dev_type][command][\n \"command_override\"\n ]\n\n if self.dev_type != \"type_0a\":\n if (\n json_data is None\n and command in payload_dict[\"type_0a\"]\n and \"command\" in payload_dict[\"type_0a\"][command]\n ):\n json_data = payload_dict[\"type_0a\"][command][\"command\"]\n if (\n command_override is None\n and command in payload_dict[\"type_0a\"]\n and \"command_override\" in payload_dict[\"type_0a\"][command]\n ):\n command_override = payload_dict[\"type_0a\"][command][\"command_override\"]\n\n if command_override is None:\n command_override = command\n if json_data is None:\n # I have yet to see a device complain about included but unneeded attribs, but they *will*\n # complain about missing attribs, so just include them all unless otherwise specified\n json_data = {\"gwId\": \"\", \"devId\": \"\", \"uid\": \"\", \"t\": \"\"}\n\n if \"gwId\" in json_data:\n if gwId is not None:\n json_data[\"gwId\"] = gwId\n else:\n json_data[\"gwId\"] = self.id\n if \"devId\" in json_data:\n if devId is not None:\n json_data[\"devId\"] = devId\n else:\n json_data[\"devId\"] = self.id\n if \"uid\" in json_data:\n if uid is not None:\n json_data[\"uid\"] = uid\n else:\n json_data[\"uid\"] = self.id\n if \"t\" in json_data:\n if json_data[\"t\"] == \"int\":\n json_data[\"t\"] = int(time.time())\n else:\n json_data[\"t\"] = str(int(time.time()))\n\n if data is not None:\n if \"dpId\" in json_data:\n json_data[\"dpId\"] = data\n elif \"data\" in json_data:\n json_data[\"data\"] = {\"dps\": data}\n else:\n json_data[\"dps\"] = data\n elif self.dev_type == \"type_0d\" and command == DP_QUERY:\n json_data[\"dps\"] = self.dps_to_request\n\n if json_data == \"\":\n payload = \"\"\n else:\n payload = json.dumps(json_data)\n # if spaces are not removed device does not respond!\n payload = payload.replace(\" \", \"\").encode(\"utf-8\")\n self.debug(\"Sending payload: %s\", payload)\n\n return MessagePayload(command_override, payload)", "def make_message(parsed):\n frame = {\n 'technology': 'LoRa',\n 'freq': parsed[3],\n 'bw': parsed[4],\n 'sf': parsed[5],\n 'snr': parsed[9] / 100.0,\n 'length': parsed[11],\n 'payload': str(parsed[14]).decode('latin-1').encode(\"utf-8\")\n }\n print frame\n return frame", "def format_payload(self):\n # Initializes the default payload structure.\n payload = {}\n embed = {\n 'author': {},\n 'footer': {},\n 'image': {},\n 'thumbnail': {},\n 'fields': []\n }\n\n # Attaches data to the payload if provided.\n if self.content:\n payload['content'] = self.content\n\n if self.title:\n embed['title'] = self.title\n\n if self.description:\n embed['description'] = self.description\n\n if self.url:\n embed['url'] = self.url\n\n if self.color:\n embed['color'] = self.color\n\n if self.timestamp:\n embed['timestamp'] = self.timestamp\n\n if self.author_name:\n embed['author']['name'] = self.author_name\n\n if self.author_url:\n embed['author']['url'] = self.author_url\n\n if self.author_icon:\n embed['author']['icon_url'] = self.author_icon\n\n if self.thumbnail_url:\n embed['thumbnail']['url'] = self.thumbnail_url\n\n if self.image:\n embed['image']['url'] = self.image\n\n if self.fields:\n embed['fields'] = self.fields\n\n if self.footer_icon:\n embed['footer']['icon_url'] = self.footer_icon\n\n if self.footer_text:\n embed['footer']['text'] = self.footer_text\n\n # If the embed object has content it gets appended to the payload\n if embed:\n payload['embeds'] = []\n payload['embeds'].append(embed)\n\n return payload", "def build_payload():\r\n payload = json.dumps({\"method\": \"ListActivePairedVolumes\",\r\n \"params\": {}, \"id\": 1})\r\n return payload", "def build(self, data: dict):", "def test_build_payload(self):\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=['pizza', 'bagel'])\n self.assertIsNotNone(pytrend.token_payload)", "def append_payload(self, payload: Payload) -> Payload:\n ...", "def construct_payload(self, **kwargs):\r\n \r\n payload = kwargs.get('parse')\r\n excude = kwargs.get('dele')\r\n\r\n if payload and excude:\r\n payload.pop(excude, None)\r\n return payload", "def _make_payload(self, width, height, depth, text):\n message = text_to_bits(text) + [0] * 32\n\n payload = message\n while len(payload) < width * height * depth:\n payload += message\n\n payload = payload[:width * height * depth]\n\n return torch.FloatTensor(payload).view(1, depth, height, width)", "def create(self):\n self.parser.add_argument('lp_file',\n help=\"Language pack file.\")\n args = self.parser.parse_args()\n with open(args.lp_file) as lang_pack_file:\n try:\n data = json.load(lang_pack_file)\n except ValueError as exc:\n print(\"Error in language pack file: %s\", str(exc))\n sys.exit(1)\n\n json_data = json.dumps(data)\n languagepack = self.client.languagepacks.create(json_data)\n fields = ['uuid', 'name', 'description', 'compiler_versions',\n 'os_platform']\n data = dict([(f, getattr(languagepack, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def fusion_api_create_lig(self, body, api=None, headers=None):\n return self.lig.create(body, api, headers)", "def make_payload(self, cls, style, attrs):\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self.make_style(style) if style else ''\n attr_str = self.make_attrs(attrs) if attrs else ''\n payload = ''.join([attr_str, cls_str, style_str])\n return payload", "def fusion_api_create_lsg(self, body, api=None, headers=None):\n return self.lsg.create(body, api, headers)", "def create(self):\n\t\t\n\t\tflagbyte = 0\n\t\tif self.synf: flagbyte += 1\n\t\tif self.ackf: flagbyte += 2\n\t\t\n\t\tself.header = struct.pack(\">IBIII\", self.connid, flagbyte, self.seqn, self.ackn, self.recv)\n\t\t\n\t\tself.data = self.header+self.payload" ]
[ "0.6448342", "0.6329533", "0.6279789", "0.6268654", "0.6185422", "0.59953004", "0.59722614", "0.596656", "0.59505904", "0.5948355", "0.5798353", "0.5759876", "0.5722105", "0.5707299", "0.5636369", "0.5631015", "0.5616373", "0.56001014", "0.55684084", "0.5548995", "0.5538745", "0.55070144", "0.54928297", "0.5462081", "0.5455599", "0.54364836", "0.53959745", "0.5372841", "0.534675", "0.53395545" ]
0.76806426
0
Deletes an LIG from the appliance based on name OR uri [Arguments]
def fusion_api_delete_lig(self, name=None, uri=None, api=None, headers=None, etag=None): return self.lig.delete(name=name, uri=uri, api=api, headers=headers, etag=etag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)", "def fusion_api_delete_sas_lig(self, name=None, uri=None, api=None, headers=None):\n return self.saslig.delete(name=name, uri=uri, api=api, headers=headers)", "def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)", "def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)", "def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)", "def fusion_api_delete_sas_li(self, name=None, uri=None, api=None, headers=None):\n return self.sasli.delete(name=name, uri=uri, api=api, headers=headers)", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def delete(self, name):\n\n pass", "def delete(self, args):\n try:\n db = get_db('intents')\n intents = db.delete_intent(args['intent'])\n resp = jsonify(intents=intents)\n resp.status_code = 200\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error)\n resp.status_code = 400\n return resp", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def remove(name):", "def delete(self, uri, where, selectionArgs):\n pass", "def delete_app(self, name):\n raise NotImplementedError", "def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}", "def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)", "def delete(self, application_id):", "def delete():", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)", "def delete(self, name):\n self.backend.delete(name)", "def delete(self, liff_id):\n api_url = 'https://api.line.me/liff/v1/apps/{0}'.format(liff_id)\n result = requests.delete(api_url, headers={\"Authorization\": self._headers[\"Authorization\"]})\n if result.status_code == 401:\n raise ErrorResponse(\"[401 Error] Certification failed.\")\n elif result.status_code == 404:\n raise ErrorResponse(\"\"\"\\\n[404 Error] The following error reasons are possible.\n・The specified LIFF application does not exist.\n・The specified LIFF application belongs to another channel.\"\"\")", "def delete(self, args, intent):\n if 'all' in args.keys() and args['all'] == True:\n try:\n db = get_db('expressions')\n db_results = db.delete_all_intent_expressions(intent)\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions)\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp \n elif args['expressions']:\n try:\n db = get_db('expressions')\n db_results = db.delete_expressions_from_intent(intent, args['expressions'])\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions, deleted_expressions=args['expressions'])\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp", "def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def fusion_api_delete_directory(self, name=None, uri=None, api=None, headers=None):\n return self.logindomain.delete(name, uri, api, headers)", "def catalog_delete(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete(args.path, headers)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def delete(self):\n self.parser.add_argument('lp_id',\n help=\"Language pack id\")\n args = self.parser.parse_args()\n self.client.languagepacks.delete(lp_id=args.lp_id)", "def fusion_api_delete_logical_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.logical_enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)" ]
[ "0.70391244", "0.6474321", "0.64738065", "0.627349", "0.6271923", "0.6258844", "0.62353176", "0.62022763", "0.61945534", "0.6159453", "0.6140253", "0.613429", "0.6125533", "0.60901445", "0.6080052", "0.6075064", "0.6070363", "0.60287416", "0.60208213", "0.59820336", "0.59742236", "0.59687", "0.59672964", "0.59358823", "0.59176904", "0.5912807", "0.59055126", "0.590017", "0.5899043", "0.5872727" ]
0.7300982
0
Deletes an interconnect from a LI location [Arguments]
def fusion_api_delete_li_interconnect(self, location, api=None, headers=None): return self.li.delete(location=location, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_interconnect(interconnectId=None):\n pass", "def delete_location(self, location_id):", "def removeNeighbor(self, neighborID):", "def delete_loc(lid):\r\n\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n db.execute(\r\n \"DELETE FROM location WHERE location_id = ? AND for_business = ?\", (lid, b_id,)\r\n )\r\n db.commit()\r\n db.execute(\"DELETE FROM warehouse WHERE loc_id = ? AND b_id = ?\", (lid, b_id,))\r\n db.commit()\r\n return redirect(url_for(\"main.locations\"))", "def delete():", "def delete_instigator_state(self, origin_id: str):", "def delete_this_region(self):", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def unlink(address):", "def delete_loc(lat, lon):\n\tredis_server = SETTINGS['REDIS_IP']\n\tredis_session = redis.StrictRedis(host=redis_server,\\\n\t\t\t\t\t\t\tport=6379, db=0)\n\tredis_session.zrem(\"all_loc\", str(str(lon), str(lat)))", "def __delitem__(self, i: Route) -> None:\n i = hash(i.addr)\n if i not in self._destinations:\n raise KeyError(\"{} key not found in the RIB\".format(i))\n del self._destinations[i]", "def delete(self, x, y):\n pass", "def remove(self, destination: n):\n try:\n self.connections.pop(destination)\n except KeyError:\n pass", "def delete_ip(ip):\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n\n # Deleting single record now\n sql_delete_query = \"\"\"DELETE from Status where ip = ?\"\"\"\n\n cursor.execute(sql_delete_query, [ip])\n sql.commit()\n\n logging.debug(\"Record deleted successfully \")\n\n cursor.close()\n sql.close()", "def del_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()", "def delete_connection(self, house):\n if house in self.connections:\n print(self.id)\n self.connections.remove(house)\n self.capacity += house.max_output\n house.connected_battery = None\n else:\n print(\"House cannot be disconnected because it is not connected\")", "def del_ip(ip_list, interrupted_ip):\n ip_index = ip_list.index(interrupted_ip)\n del ip_list[ip_index]\n return ip_list", "def office_delete_adjoint_sources_for_iteration(parser, args, params):\n parser.parse_known_args(args)\n control.delete_adjoint_sources_for_iteration(params)", "def removeJoint(*args, **kwargs)->None:\n pass", "def delete_by_local_path(self, list_of_local_paths): # todo: check error handling\n conn = self.create_connection()\n conn.isolation_level = None\n c = conn.cursor()\n c.execute(\"begin\")\n try:\n for lp in list_of_local_paths:\n cmd = 'DELETE FROM %s WHERE %s=\"%s\"' % (self.TABLE_NAME, self.LOC, lp)\n c.execute(cmd)\n # c.execute(\"fnord\") # to check if transaction rollbacks\n conn.commit()\n except sqlite3.Error:\n print(\"Transaction failed!\")\n conn.rollback()\n conn.close()", "def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err", "def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])", "def unlink(self, link_id):", "def remove_neighbor(self):\n self.fono -= 1", "def delete_conf(src_ip):\n return delete_route(src_ip)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, params=None):\n self.client.delete_vpn_connection_route(**params)", "def removeConnection(tagA, tagB): #@NoSelf", "def remove_location(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.remove'\n return self.call(self.options)", "def logbook_delete(lb):\n return IMPL.logbook_delete(lb)" ]
[ "0.70523", "0.6351125", "0.62242943", "0.5983546", "0.5859711", "0.58195263", "0.5752079", "0.5739547", "0.5688578", "0.5688009", "0.56834584", "0.5672571", "0.5653946", "0.5601045", "0.55577123", "0.55562395", "0.55517256", "0.55011714", "0.54998237", "0.54908866", "0.5473528", "0.5473036", "0.5464577", "0.54616624", "0.5457308", "0.54568166", "0.5449031", "0.5443102", "0.5428051", "0.54236287" ]
0.72726476
0
Gets the ethernetSettings for the given LI [Arguments]
def fusion_api_get_li_ethernet_settings(self, uri, api=None, headers=None): param = '/ethernetSettings' return self.li.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']", "def GetTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('getTapSettings', payload=payload, response_object=None)", "def intGet(): \n macchanger, ip, iwconfig = pathGet()\n interfaces = []\n a = str(subprocess.check_output(\"{} link show\".format(ip), shell=True))\n ints = a.split(': ')\n for i in range(len(ints)):\n if len(ints[i].split()) == 1:\n if ints[i] not in [\"1\", \"lo\", \"b'1\"]:\n interface = {'name':str(ints[i])}\n interfaces.append(interface)\n # Get interface properties\n for interface in interfaces:\n name = interface['name']\n macs = subprocess.check_output(\"{} -s {}\".format(macchanger, name), shell=True).decode(\"utf-8\")\n interface['cMac'] = macs.split()[2]\n interface['cVend'] = macs.split(\"(\")[1].split(\")\")[0]\n interface['pMac'] = macs.split(\"\\n\")[1].split()[2]\n interface['pVend'] = macs.split(\"\\n\")[1].split(\"(\")[1].split(\")\")[0]\n try:\n mon = subprocess.check_output(\"{} {} 2> /dev/null\".format(iwconfig, name), shell=True).split()\n mon1 = mon[3].decode(\"utf-8\").split(':')[1]\n if mon1 == 'off/any':\n mon1 = mon[4].decode(\"utf-8\").split(':')[1]\n interface['mon'] = mon1\n except:\n interface['mon'] = 'Wired'\n return(interfaces)", "def get_network_settings(self, nReserved = 0):\n\t\treturn Job(SDK.PrlVmGuest_GetNetworkSettings(self.handle, nReserved)[0])", "def GetTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"getTapSettings\", payload=payload, response_object=None)", "def read_config(*args):\n\n ret = {}\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"get\")\n else:\n cmd = _traffic_line(\"-r\")\n\n try:\n for arg in args:\n log.debug(\"Querying: %s\", arg)\n ret[arg] = _subprocess(cmd + [arg])\n except KeyError:\n pass\n\n return ret", "def network_config(args): # pylint: disable-msg=W0613\n if not NETLOCK.acquire_read(NET_LOCK_TIMEOUT):\n raise HttpReqError(503, \"unable to take NETLOCK for reading after %s seconds\" % NET_LOCK_TIMEOUT)\n try:\n netconf = xivo_config.load_current_configuration()\n return yaml_json.stringify_keys(netconf)\n finally:\n NETLOCK.release()", "def fusion_api_update_li_ethernet_settings(self, body=None, uri=None, api=None, headers=None):\n param = '/ethernetSettings'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def get_settings():\n try:\n branches = database.select(database.QUERY[mn()])\n # QUERY['get_settings'] = \"SELECT number, name, time, intervals, time_wait, start_time, line_type, base_url, pump_enabled from lines where line_type='power_outlet' order by number\"\n for row in branches:\n branch_id = row[0]\n name = row[1]\n time = row[2]\n intervals = row[3]\n time_wait = row[4]\n start_time = row[5]\n line_type = row[6]\n base_url = row[7]\n pump_enabled = row[8]\n\n BRANCHES_SETTINGS[branch_id] = {\n 'branch_id': branch_id,\n 'name': name,\n 'time': time,\n 'intervals': intervals,\n 'time_wait': time_wait,\n 'start_time': start_time,\n 'line_type': line_type,\n 'base_url': base_url,\n 'pump_enabled': True if pump_enabled == 1 else False\n }\n logging.debug(\"{0} added to settings\".format(str(BRANCHES_SETTINGS[branch_id])))\n except Exception as e:\n logging.error(\"Exceprion occured when trying to get settings for all branches. {0}\".format(e))", "def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))", "def GetNetworkPerformanceConfig(args, client):\n\n network_perf_args = getattr(args, 'network_performance_configs', [])\n network_perf_configs = client.messages.NetworkPerformanceConfig()\n\n for config in network_perf_args:\n total_tier = config.get('total-egress-bandwidth-tier', '').upper()\n if total_tier:\n network_perf_configs.totalEgressBandwidthTier = client.messages.NetworkPerformanceConfig.TotalEgressBandwidthTierValueValuesEnum(\n total_tier)\n\n return network_perf_configs", "def readConfig(file=\"config.ini\"):\n ip_pool = []\n cmd_pool = []\n Config=ConfigParser.ConfigParser()\n Config.read(file)\n machines = Config.items(\"MACHINES\")\n commands = Config.items(\"COMMANDS\")\n for ip in machines:\n ip_pool.append(ip[1])\n for cmd in commands:\n cmd_pool.append(cmd[1])\n print cmd[1]\n return ip_pool,cmd_pool", "def fusion_api_get_ethernet_networks(self, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.get(uri=uri, api=api, headers=headers, param=param)", "def linux():\n command = \"cat /etc/NetworkManager/system-connections/*\"\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n return networks", "def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def network_settings():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSDomainName <Domain Name>\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSDomainName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSServer1 \"+colo_dns[DEFAULT_COLO ][0])\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSServer1 failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSServer2 \"+colo_dns[DEFAULT_COLO ][1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSServer2 failed \")", "def _network_conf(conf_tuples=None, **kwargs):\n nic = kwargs.get(\"network_profile\", None)\n ret = []\n nic_opts = kwargs.get(\"nic_opts\", {})\n if nic_opts is None:\n # coming from elsewhere\n nic_opts = {}\n if not conf_tuples:\n conf_tuples = []\n old = _get_veths(conf_tuples)\n if not old:\n old = {}\n\n # if we have a profile name, get the profile and load the network settings\n # this will obviously by default look for a profile called \"eth0\"\n # or by what is defined in nic_opts\n # and complete each nic settings by sane defaults\n if nic and isinstance(nic, ((str,), dict)):\n nicp = get_network_profile(nic)\n else:\n nicp = {}\n if DEFAULT_NIC not in nicp:\n nicp[DEFAULT_NIC] = {}\n\n kwargs = copy.deepcopy(kwargs)\n gateway = kwargs.pop(\"gateway\", None)\n bridge = kwargs.get(\"bridge\", None)\n if nic_opts:\n for dev, args in nic_opts.items():\n ethx = nicp.setdefault(dev, {})\n try:\n ethx = salt.utils.dictupdate.update(ethx, args)\n except AttributeError:\n raise SaltInvocationError(\"Invalid nic_opts configuration\")\n ifs = [a for a in nicp]\n ifs += [a for a in old if a not in nicp]\n ifs.sort()\n gateway_set = False\n for dev in ifs:\n args = nicp.get(dev, {})\n opts = nic_opts.get(dev, {}) if nic_opts else {}\n old_if = old.get(dev, {})\n disable = opts.get(\"disable\", args.get(\"disable\", False))\n if disable:\n continue\n mac = opts.get(\n \"mac\", opts.get(\"hwaddr\", args.get(\"mac\", args.get(\"hwaddr\", \"\")))\n )\n type_ = opts.get(\"type\", args.get(\"type\", \"\"))\n flags = opts.get(\"flags\", args.get(\"flags\", \"\"))\n link = opts.get(\"link\", args.get(\"link\", \"\"))\n ipv4 = opts.get(\"ipv4\", args.get(\"ipv4\", \"\"))\n ipv6 = opts.get(\"ipv6\", args.get(\"ipv6\", \"\"))\n infos = salt.utils.odict.OrderedDict(\n [\n (\n \"lxc.network.type\",\n {\n \"test\": not type_,\n \"value\": type_,\n \"old\": old_if.get(\"lxc.network.type\"),\n \"default\": \"veth\",\n },\n ),\n (\n \"lxc.network.name\",\n {\"test\": False, \"value\": dev, \"old\": dev, \"default\": dev},\n ),\n (\n \"lxc.network.flags\",\n {\n \"test\": not flags,\n \"value\": flags,\n \"old\": old_if.get(\"lxc.network.flags\"),\n \"default\": \"up\",\n },\n ),\n (\n \"lxc.network.link\",\n {\n \"test\": not link,\n \"value\": link,\n \"old\": old_if.get(\"lxc.network.link\"),\n \"default\": search_lxc_bridge(),\n },\n ),\n (\n \"lxc.network.hwaddr\",\n {\n \"test\": not mac,\n \"value\": mac,\n \"old\": old_if.get(\"lxc.network.hwaddr\"),\n \"default\": salt.utils.network.gen_mac(),\n },\n ),\n (\n \"lxc.network.ipv4\",\n {\n \"test\": not ipv4,\n \"value\": ipv4,\n \"old\": old_if.get(\"lxc.network.ipv4\", \"\"),\n \"default\": None,\n },\n ),\n (\n \"lxc.network.ipv6\",\n {\n \"test\": not ipv6,\n \"value\": ipv6,\n \"old\": old_if.get(\"lxc.network.ipv6\", \"\"),\n \"default\": None,\n },\n ),\n ]\n )\n # for each parameter, if not explicitly set, the\n # config value present in the LXC configuration should\n # take precedence over the profile configuration\n for info in list(infos.keys()):\n bundle = infos[info]\n if bundle[\"test\"]:\n if bundle[\"old\"]:\n bundle[\"value\"] = bundle[\"old\"]\n elif bundle[\"default\"]:\n bundle[\"value\"] = bundle[\"default\"]\n for info, data in infos.items():\n if data[\"value\"]:\n ret.append({info: data[\"value\"]})\n for key, val in args.items():\n if key == \"link\" and bridge:\n val = bridge\n val = opts.get(key, val)\n if key in [\n \"type\",\n \"flags\",\n \"name\",\n \"gateway\",\n \"mac\",\n \"link\",\n \"ipv4\",\n \"ipv6\",\n ]:\n continue\n ret.append({f\"lxc.network.{key}\": val})\n # gateway (in automode) must be appended following network conf !\n if not gateway:\n gateway = args.get(\"gateway\", None)\n if gateway is not None and not gateway_set:\n ret.append({\"lxc.network.ipv4.gateway\": gateway})\n # only one network gateway ;)\n gateway_set = True\n # normally, this won't happen\n # set the gateway if specified even if we did\n # not managed the network underlying\n if gateway is not None and not gateway_set:\n ret.append({\"lxc.network.ipv4.gateway\": gateway})\n # only one network gateway ;)\n gateway_set = True\n\n new = _get_veths(ret)\n # verify that we did not loose the mac settings\n for iface in [a for a in new]:\n ndata = new[iface]\n nmac = ndata.get(\"lxc.network.hwaddr\", \"\")\n ntype = ndata.get(\"lxc.network.type\", \"\")\n omac, otype = \"\", \"\"\n if iface in old:\n odata = old[iface]\n omac = odata.get(\"lxc.network.hwaddr\", \"\")\n otype = odata.get(\"lxc.network.type\", \"\")\n # default for network type is setted here\n # attention not to change the network type\n # without a good and explicit reason to.\n if otype and not ntype:\n ntype = otype\n if not ntype:\n ntype = \"veth\"\n new[iface][\"lxc.network.type\"] = ntype\n if omac and not nmac:\n new[iface][\"lxc.network.hwaddr\"] = omac\n\n ret = []\n for val in new.values():\n for row in val:\n ret.append(salt.utils.odict.OrderedDict([(row, val[row])]))\n # on old versions of lxc, still support the gateway auto mode\n # if we didn't explicitly say no to\n # (lxc.network.ipv4.gateway: auto)\n if (\n Version(version()) <= Version(\"1.0.7\")\n and True not in [\"lxc.network.ipv4.gateway\" in a for a in ret]\n and True in [\"lxc.network.ipv4\" in a for a in ret]\n ):\n ret.append({\"lxc.network.ipv4.gateway\": \"auto\"})\n return ret", "def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings", "def _get_lsp_config_adaptive(self):\n return self.__lsp_config_adaptive", "def load_switches(self):\n new_switches = list()\n for site in self.sites:\n switches = self.get_switches_stats(site_id=site['id'])\n for switch in switches:\n if len(switch['name']) < 1:\n switch['name'] = ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)])\n new_switch = {\n \"name\": switch['name'],\n \"site\": site['name'],\n \"site_id\": site['id'],\n \"device_id\": switch['id'],\n \"mac\": switch['mac'],\n \"mac_str\": ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)]),\n \"ip_config\": switch['ip_config'],\n \"ip_actual\": switch['ip_stat'],\n \"net_obj\": get_network(address=switch['ip_config']['ip'], netmask=switch['ip_config']['netmask']) if 'ip' in switch['ip_config'] else None\n }\n for vlan, addr in new_switch['ip_actual']['ips'].items():\n if new_switch['ip_actual']['ip'] == addr:\n new_switch['ip_actual']['vlan'] = vlan.strip('vlan')\n else:\n new_switch['ip_actual']['vlan'] = 0\n if new_switch['ip_config']['network'] and new_switch['ip_config']['network'] != \"default\":\n new_switch['ip_config']['vlan'] = site['network_template']['networks'][new_switch['ip_config']['network']]['vlan_id']\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n elif new_switch['ip_config']['network'] and new_switch['ip_config']['network'] == \"default\":\n new_switch['ip_config']['vlan'] = 1\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n else:\n new_switch['ip_config']['vlan'] = 0\n logger.error(f\"Did not match {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n new_switches.append(new_switch)\n self.switches = new_switches", "def get_snmp_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/snmp-setting\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_switches(self) -> tuple:\n return self.switches", "def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def get_config():\n\n\n # si prova con dict\n current_cfg = TilestacheConfig.get_cache_config_dict()\n if current_cfg:\n return TilestacheConfig(config_dict=current_cfg)\n else:\n cfg = TilestacheConfig()\n TilestacheConfig.set_cache_config_dict(cfg.config_dict)\n return cfg\n\n\n '''\n logger.debug('-------------- get_config -----------------')\n logger.debug('PID {}'.format(os.getpid()))\n # check if file has exixst\n tilestache_cfg = apps.get_app_config('caching').tilestache_cfg\n logger.debug('CID {}'.format(id(tilestache_cfg)))\n logger.debug('LAYERS {}'.format(tilestache_cfg.config.layers))\n if os.path.exists(tilestache_cfg.file_hash_name):\n cid = tilestache_cfg.read_hash_file()\n if cid != tilestache_cfg.get_cache_hash():\n logger.debug('Reistanzia Tcfg'.format(tilestache_cfg.config.layers))\n tilestache_cfg = TilestacheConfig()\n tilestache_cfg.set_cache_hash(cid)\n apps.get_app_config('caching').tilestache_cfg = tilestache_cfg\n\n return tilestache_cfg\n '''", "def networks(self): # type: () -> t.Optional[t.Dict[str, t.Dict[str, t.Any]]]\n return self.network_settings.get('Networks')", "def get_lldp_config():\n query = {\"type\": \"op\", \"cmd\": \"<show><lldp><config>all</config></lldp></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "def __get_scanning_range(self):\n if self.__network is not None:\n return [self.__network]\n networks = []\n interfaces = netifaces.interfaces()\n for data in interfaces:\n ips = netifaces.ifaddresses(data)\n for key, interface_data in ips.items():\n for item in interface_data:\n if item.get(\"netmask\", None) is not None and \\\n item.get(\"addr\", None) is not None and \\\n self.is_legal_ip(item[\"netmask\"]):\n if item.get(\"addr\") not in [\"127.0.0.1\", \"0.0.0.0\"]:\n network = \"{ip}/{cird}\".format(ip=item[\"addr\"],\n cird=IPAddress(item[\"netmask\"]).netmask_bits())\n if network not in networks:\n networks.append(network)\n return networks" ]
[ "0.5640884", "0.54121983", "0.5334801", "0.5304122", "0.52035016", "0.51531327", "0.5137327", "0.51362133", "0.51358837", "0.51007533", "0.49597058", "0.49322188", "0.48798576", "0.48716736", "0.48688695", "0.48512754", "0.48512754", "0.4835732", "0.48269528", "0.48084754", "0.47821996", "0.47736356", "0.4759858", "0.475483", "0.4740171", "0.4720382", "0.4710826", "0.47084993", "0.47003865", "0.469287" ]
0.6779407
0
Gets the fcoeSettings for the given LI [Arguments]
def fusion_api_get_li_fcoe_settings(self, uri, api=None, headers=None): param = '/fcoeSettings' return self.li.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settings_f(self):\n\n return self._get_list_field(\"settings\", lambda x: _build_setting(self.settings(), x))", "def get_params(path = 'INPUT/conv_params'):\n\n # cd to Input, read the conv_params file in and pass each line to file reader\n list = file_reader(path)\n\n Ecuts = list[0] # first element returned from filereader is the energies\n start = int(Ecuts[0][0]) # the first element of this is the lower energy to start from. convert to integer for maths\n multiplier = int(Ecuts[1][0]) # middle element is the step size\n end = int(Ecuts[2][0]) # last element is upper bound on energy\n E_range = (end - start)//multiplier +1 # the number of energies you will create\n Es = [i*multiplier for i in range(E_range)] # take steps in the E_range of step size multiplier\n Ecuts = [[str(i+start)] for i in Es] # add the start energy to all these steps to shift them to correct energies\n # convert the numbers to strings for ease of file writing later\n\n kpts = list[1] # kpoints list is first element returned\n def_E = list[2] # default energy\n def_k = list[3] # default kpoints\n params = Settings(Ecuts, kpts, def_E, def_k) # create the settings object\n\n return params # return the object", "def get_fe_params(self):\n return self._params[0:self.k_fe]", "def retrieve_args(argument_flags, tr_files):\n \n global output_graph, output_labels, host_nm, port_nm, box_images, log_image_path\n \n if argument_flags.output_graph:\n output_graph = tr_files.join_path(argument_flags.output_graph,\n file_utils.WEIGHTS_FILE)\n output_labels = tr_files.join_path(argument_flags.output_graph,\n file_utils.LABELS_FILE)\n else:\n output_graph = tr_files.get_or_init_files_path()\n output_labels = tr_files.get_or_init_labels_path()\n print('Output graph path was set as - ' , output_graph)\n print('Output labels path was set as - ' , output_labels)\n \n if argument_flags.box_images:\n box_images = argument_flags.box_images\n else:\n box_images = False\n \n if box_images and argument_flags.log_image_path:\n log_image_path = argument_flags.log_image_path\n else:\n log_image_path = None\n \n host_nm = argument_flags.host\n port_nm = argument_flags.port", "def settings():\r\n\r\n config = cp.ConfigParser()\r\n config.read('settings.ini')\r\n \r\n files = config['files']\r\n model = config['model']\r\n plot = config['plot']\r\n \r\n file_format = files['format']\r\n species_file = r'data/' + files['species file']\r\n reactions_file = r'data/' + files['reactions file']\r\n output_file = 'output/' + files['output file']\r\n model_type = model['model type']\r\n density = model.getfloat('density')\r\n temperature = model.getfloat('temperature')\r\n start_time = model.getfloat('start time')\r\n end_time = model.getfloat('end time')\r\n outfile = plot['outfile for plotting']\r\n\r\n return file_format, species_file, reactions_file, output_file, model_type, density, temperature, start_time, end_time, outfile", "def ffn_runs(env):\n\n # different parameters to vary, first entry is name of parameter, second is possible values\n\n params = [\n ['learning_freq', [1, 10, 100, 1000]],\n ['target_update_freq', [1, 10, 100, 1000]]\n ]\n\n\n constraints = [\n lambda setting: setting[0] <= setting[1] # enforces that learning_freq <= target_update_freq\n ]\n\n\n return get_arg_list(params, constraints, FFN_ARGS)", "def parseL3FormulaWithSettings(*args):\n return _libsbml.parseL3FormulaWithSettings(*args)", "def get_settings(f_obj):\n\n return {setting : getattr(f_obj, setting) for setting in get_obj_desc()['settings']}", "def get_sargs(args):\n import sys\n sys.argv = args\n from basis.basis_solve import _parser_options\n return _parser_options()", "def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)", "def fpp_config(koi, **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n config = ConfigObj(os.path.join(folder,'fpp.ini'))\n\n koi = ku.koiname(koi)\n\n rowefit = jrowe_fit(koi)\n\n config['name'] = koi\n ra,dec = ku.radec(koi)\n config['ra'] = ra\n config['dec'] = dec\n config['rprs'] = rowefit.ix['RD1','val']\n config['period'] = rowefit.ix['PE1', 'val']\n\n config['starfield'] = kepler_starfield_file(koi)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n config['constraints'] = {}\n config['constraints']['maxrad'] = default_r_exclusion(koi)\n try:\n config['constraints']['secthresh'] = pipeline_weaksec(koi)\n except NoWeakSecondaryError:\n pass\n\n return config", "def getdefflags(config_nm):\n if config_nm is 'train':\n user_params = user_params_train\n elif config_nm is 'eval':\n user_params = user_params_eval\n elif config_nm is 'tfrecorder':\n user_params = user_params_recorder\n else:\n print('Unrecognized configuration name : %s, exiting ....' % config_nm)\n exit(-1)\n\n\n return mandatory_params+user_params", "def get_opts(args=None):\n\n # Set up argument parser\n parser = ArgParser(add_help=False, usage='%(prog)s [options]',\n description='PSF Deconvolution Script',\n formatter_class=formatter,\n fromfile_prefix_chars='@')\n required = parser.add_argument_group('Required Arguments')\n optional = parser.add_argument_group('Optional Arguments')\n init = parser.add_argument_group(' * Initialisation')\n optimisation = parser.add_argument_group(' * Optimisation')\n lowrank = parser.add_argument_group(' * Low-Rank Aproximation')\n sparsity = parser.add_argument_group(' * Sparsity')\n psfest = parser.add_argument_group(' * PSF Estimation')\n shape = parser.add_argument_group(' * Shape Constraint')\n condat = parser.add_argument_group(' * Condat Algorithm')\n testing = parser.add_argument_group(' * Testing')\n hidden = parser.add_argument_group(' * Hidden Options')\n\n # Add arguments\n optional.add_argument('-h', '--help', action='help',\n help='show this help message and exit')\n\n optional.add_argument('-v', '--version', action='version',\n version='%(prog)s {}'.format(__version__))\n\n optional.add_argument('-q', '--quiet', action='store_true',\n help='Suppress verbose.')\n\n required.add_argument('-i', '--input', required=True,\n help='Input noisy data file name.')\n\n required.add_argument('-p', '--psf_file', required=True,\n help='PSF file name.')\n\n hidden.add_argument('--psf_type', choices=('fixed', 'obj_var'),\n default='obj_var', help=ap.SUPPRESS)\n\n optional.add_argument('-o', '--output', help='Output file name.')\n\n optional.add_argument('--output_format', choices={'npy', 'fits'},\n default='npy', help='Output file format.')\n\n init.add_argument('-k', '--current_res',\n help='Current deconvolution results file name.')\n\n hidden.add_argument('--primal', help=ap.SUPPRESS)\n\n init.add_argument('--noise_est', type=float,\n help='Initial noise estimate.')\n\n optimisation.add_argument('-m', '--mode', default='lowr',\n choices=('all', 'sparse', 'lowr', 'grad'),\n help='Option to specify the regularisation '\n 'mode.')\n\n optimisation.add_argument('--opt_type', default='condat',\n choices=('condat', 'fwbw', 'gfwbw'),\n help='Option to specify the optimisation method '\n 'to be implemented.')\n\n optimisation.add_argument('--n_iter', type=int, default=150,\n help='Number of iterations.')\n\n optimisation.add_argument('--cost_window', type=int, default=1,\n help='Window to measure cost function.')\n\n optimisation.add_argument('--convergence', type=float,\n default=3e-4, help='Convergence tolerance.')\n\n optimisation.add_argument('--no_pos', action='store_true',\n help='Option to turn off postivity constraint.')\n\n optimisation.add_argument('--no_plots', action='store_true',\n help='Suppress plots.')\n\n optimisation.add_argument('--grad_type', default='psf_known',\n choices=('psf_known', 'psf_unknown', 'shape',\n 'none'),\n help='Option to specify the type of gradient.')\n\n optimisation.add_argument('--convolve_method', default='astropy',\n choices=('astropy', 'scipy'),\n help='Option to specify the convolution method.')\n\n lowrank.add_argument('--lowr_thresh_factor', type=float, default=1,\n help='Low rank threshold factor.')\n\n lowrank.add_argument('--lowr_type', choices=('standard', 'ngole'),\n default='standard', help='Low rank type.')\n\n lowrank.add_argument('--lowr_thresh_type', choices=('hard', 'soft'),\n default='hard', help='Low rank threshold type.')\n\n sparsity.add_argument('--wavelet_type', default='1',\n help='mr_transform wavelet type.')\n\n sparsity.add_argument('--wave_thresh_factor', type=float, nargs='+',\n default=[3.0, 3.0, 4.0],\n help='Wavelet threshold factor.')\n\n sparsity.add_argument('--n_reweights', type=int, default=1,\n help='Number of reweightings.')\n\n psfest.add_argument('--lambda_psf', type=float, default=1.0,\n help='Regularisation control parameter for PSF '\n 'estimation')\n\n psfest.add_argument('--beta_psf', type=float, default=1.0,\n help='Gradient step for PSF estimation')\n\n shape.add_argument('--lambda_shape', type=float, default=1.0,\n help='Regularisation control parameter for shape '\n 'constraint')\n\n condat.add_argument('--relax', type=float, default=0.8,\n help='Relaxation parameter (rho_n).')\n\n condat.add_argument('--condat_sigma', type=float, nargs='?', const=None,\n default=0.5, help='Condat proximal dual parameter.')\n\n condat.add_argument('--condat_tau', type=float, nargs='?', const=None,\n default=0.5, help='Condat proximal primal parameter')\n\n testing.add_argument('-c', '--clean_data', help='Clean data file name.')\n\n testing.add_argument('-r', '--random_seed', type=int, help='Random seed.')\n\n testing.add_argument('--true_psf', help='True PSFs file name.')\n\n testing.add_argument('--kernel', type=float,\n help='Sigma value for Gaussian kernel.')\n\n testing.add_argument('--metric', choices=('mean', 'median'),\n default='median', help='Metric to average errors.')\n\n # Return the argument namespace\n return parser.parse_args(args)", "def get_inputs_scf(self):\n input_scf = AttributeDict(self.exposed_inputs(FleurScfWorkChain, namespace='scf'))\n\n with inpxml_changes(input_scf) as fm:\n\n if [x for x in self.ctx.wf_dict['ref_qss'] if x != 0]:\n fm.set_inpchanges({\n 'qss': self.ctx.wf_dict['ref_qss'],\n 'l_noco': True,\n 'ctail': False,\n 'l_ss': True,\n 'l_soc': False\n })\n else:\n fm.set_inpchanges({\n 'qss': ' 0.0 0.0 0.0 ',\n 'l_noco': False,\n 'ctail': True,\n 'l_ss': False,\n 'l_soc': False\n })\n\n # change beta parameter\n for key, val in self.ctx.wf_dict['beta'].items():\n fm.set_atomgroup_label(key, {'nocoParams': {'beta': val}})\n\n if 'structure' in input_scf: # add info about spin spiral propagation\n if 'calc_parameters' in input_scf:\n calc_parameters = input_scf.calc_parameters.get_dict()\n else:\n calc_parameters = {}\n sum_vec = np.array([np.pi / 4.0, np.e / 3.0, np.euler_gamma])\n calc_parameters['qss'] = {'x': sum_vec[0], 'y': sum_vec[1], 'z': sum_vec[2]}\n calc_parameters['soc'] = {'theta': 0.7, 'phi': 0.7}\n input_scf.calc_parameters = Dict(calc_parameters)\n return input_scf", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def GetSettingsForOperation(self, operation):\r\n if (operation == IMPORT):\r\n options = []\r\n for entry in FXsi.__IMPORT_OPTIONS:\r\n options.append(FSettingEntry(*entry))\r\n return options\r\n elif (operation == EXPORT):\r\n options = []\r\n for entry in FXsi.__EXPORT_OPTIONS:\r\n options.append(FSettingEntry(*entry))\r\n return options\r\n elif (operation == RENDER): \r\n options = []\r\n for entry in FXsi.__RENDER_OPTIONS:\r\n options.append(FSettingEntry(*entry))\r\n return options\r\n else:\r\n return []", "def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)", "def _add_fp_configs(CONFIG):\n CONFIG.declare(\n 'fp_cutoffdecr',\n ConfigValue(\n default=1e-1,\n domain=PositiveFloat,\n description='Additional relative decrement of cutoff value for the original objective function.',\n ),\n )\n CONFIG.declare(\n 'fp_iteration_limit',\n ConfigValue(\n default=20,\n domain=PositiveInt,\n description='Feasibility pump iteration limit',\n doc='Number of maximum iterations in the feasibility pump methods.',\n ),\n )\n # TODO: integrate this option\n CONFIG.declare(\n 'fp_projcuts',\n ConfigValue(\n default=True,\n description='Whether to add cut derived from regularization of MIP solution onto NLP feasible set.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_transfercuts',\n ConfigValue(\n default=True,\n description='Whether to transfer cuts from the Feasibility Pump MIP to main MIP in selected strategy (all except from the round in which the FP MIP became infeasible).',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_projzerotol',\n ConfigValue(\n default=1e-4,\n domain=PositiveFloat,\n description='Tolerance on when to consider optimal value of regularization problem as zero, which may trigger the solution of a Sub-NLP.',\n ),\n )\n CONFIG.declare(\n 'fp_mipgap',\n ConfigValue(\n default=1e-2,\n domain=PositiveFloat,\n description='Optimality tolerance (relative gap) to use for solving MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_discrete_only',\n ConfigValue(\n default=True,\n description='Only calculate the distance among discrete variables in regularization problems.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_main_norm',\n ConfigValue(\n default='L1',\n domain=In(['L1', 'L2', 'L_infinity']),\n description='Different forms of objective function MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint',\n ConfigValue(\n default=True,\n description='Whether to add the norm constraint to FP-NLP',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint_coef',\n ConfigValue(\n default=1,\n domain=PositiveFloat,\n description='The coefficient in the norm constraint, correspond to the Beta in the paper.',\n ),\n )", "def get_params(self, comArgs):\n params = {}\n flags = []\n \n for c in comArgs:\n if len(c) == 1:\n flags.append(c.lower())\n else:\n k = c[0]\n v = c[1:]\n params[k] = float(v)\n return params, flags", "def getF(*params):\n mode_flag = 0\n for param in params:\n if isinstance(param, nd.NDArray):\n if mode_flag < 0:\n raise TypeError(\"Expect parameters to have consistent running mode,\" +\n \" got {}\".format([type(p) for p in params]))\n mode_flag = 1\n elif isinstance(param, sym.Symbol):\n if mode_flag > 0:\n raise TypeError(\"Expect parameters to have consistent running mode,\" +\n \" got {}\".format([type(p) for p in params]))\n mode_flag = -1\n # In case of scalar params, we choose to use the imperative mode.\n if mode_flag < 0:\n return sym\n return nd", "def get_input_settings(self):\n\n input_settings = {\"name\": name,\n \"start\": self._start_settings, \"parallel\": self._parallel_settings ,\n \"electronic\": self._electronic_settings, \"magnetic\": self._magnetic_settings,\n \"hybrid\": self._hybrid_settings, \"hubbard\": self._hubbard_settings, \"misc_setting\": self._misc_settings}\n return input_settings", "def getFiList():\n fi = getFi()\n fiList = []\n for arg,val in fi.items():\n fiList.append(val)\n return fiList", "def SetupFcn(self):\n return _hypre.HypreTriSolve_SetupFcn(self)", "def _get_lsp_config_frr_one_to_one(self):\n return self.__lsp_config_frr_one_to_one", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def main():\r\n\t\"\"\"\r\n\tmain() parses arguments to pass to SimPlt\r\n\t\"\"\"\r\n\ttry: b = float(argv[1])\r\n\texcept IndexError: b = 1.0\r\n\t\r\n\ttry: X = float(argv[2])\r\n\texcept IndexError: X = 1.0\r\n\t\t\r\n\tparser = optparse.OptionParser()\r\n\t\r\n\tparser.add_option('-t','--timefac',\r\n dest=\"timefac\",\r\n default=1.0,\r\n type=\"float\",\r\n )\r\n\tparser.add_option('--BW',\r\n dest=\"BW\",\r\n default=False,\r\n action=\"store_true\"\r\n )\r\n\t\t\t\t \r\n\tparser.add_option('-s','--smooth',\r\n dest=\"smooth\",\r\n default=1.0,\r\n type=\"float\",\r\n )\r\n\t\t\t\t \r\n\topt = parser.parse_args()[0]\r\n \t\t\r\n\tLE_SimPlt(b, X, opt.timefac, opt.BW, opt.smooth)\r\n\r\n\treturn", "def load_cfg_fom_args(description=\"Config file options.\", is_train=True):\n parser = argparse.ArgumentParser(description=description)\n help_s = \"Config file location\"\n parser.add_argument(\"--cfg\", dest=\"cfg_file\", help=help_s, required=True, type=str)\n help_s = \"See pycls/core/config.py for all options\"\n parser.add_argument(\"opts\", help=help_s, default=None, nargs=argparse.REMAINDER)\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args()\n load_cfg(args.cfg_file)\n _C.merge_from_list(args.opts)\n if is_train:\n time_local = time.localtime()\n name_expend = \"%02d%02d_%02d%02d%02d_\"%(time_local[1], time_local[2],time_local[3], time_local[4], time_local[5])\n _C.NAME = name_expend +_C.NAME", "def sparameters(**kwargs):\n d = sweep(**kwargs)\n d.pop(\"GC_sweeps.lsf\", \"\")\n d[\"main.lsf\"] = \"\\n\".join([\"GC_init;\", \"GC_S_extraction;\"])\n d[\"GC_S_extraction.lsf\"] = open(\n CONFIG[\"grating_coupler\"] / \"GC_S_extraction.lsf\"\n ).read()\n d[\"GC_setup_fibre.lsf\"] = open(\n CONFIG[\"grating_coupler\"] / \"GC_setup_fibre.lsf\"\n ).read()\n d[\n \"main.py\"\n ] = \"\"\"\n\nimport pathlib\nimport json\nimport lumapi\n\n\ndirpath = pathlib.Path(__file__).parent.absolute()\n\ns = lumapi.FDTD()\ns.cd(str(dirpath))\ns.eval(\"main;\")\n\nd = {k: list(abs(s.getv(k).flatten())) for k in [\"S11\", \"S12\", \"S21\", \"S22\", \"f\"]}\n\nwith open(dirpath / \"GC_sparameters.json\", \"w\") as f:\n f.write(json.dumps(d))\n\n \"\"\"\n return d", "def get_flags():\n flags.DEFINE_string(\n 'model_name',\n help='MobileNet version name: mobilenet_v1, mobilenet_v2, '\n 'mobilenet_v3_small and mobilenet_v3_large',\n default='mobilenet_v1'\n )\n flags.DEFINE_string(\n 'dataset_name',\n help='Dataset name from TDFS to train on: imagenette, imagenet2012',\n default='imagenette'\n )\n flags.DEFINE_string(\n 'model_dir',\n help='Working directory.',\n default='./tmp'\n )\n flags.DEFINE_string(\n 'data_dir',\n help='Directory for training data.',\n default=None\n )\n flags.DEFINE_bool(\n 'resume_checkpoint',\n help='Whether resume training from previous checkpoint.',\n default=False\n )\n flags.DEFINE_string(\n 'optimizer_name',\n help='Name of optimizer.',\n default='rmsprop'\n )\n flags.DEFINE_string(\n 'learning_scheduler_name',\n help='Name of learning rate scheduler.',\n default='exponential'\n )\n # for hyperparameter tuning\n flags.DEFINE_float(\n 'op_momentum',\n help='Optimizer momentum.',\n default=0.9\n )\n flags.DEFINE_float(\n 'op_decay_rate',\n help='Optimizer discounting factor for gradient.',\n default=0.9\n )\n flags.DEFINE_float(\n 'lr',\n help='Base learning rate.',\n default=0.008\n )\n flags.DEFINE_float(\n 'lr_decay_rate',\n help='Magnitude of learning rate decay.',\n default=0.97\n )\n flags.DEFINE_float(\n 'lr_decay_epochs',\n help='Frequency of learning rate decay.',\n default=2.4\n )\n flags.DEFINE_float(\n 'label_smoothing',\n help='The amount of label smoothing.',\n default=0.0,\n )\n flags.DEFINE_float(\n 'ma_decay_rate',\n help='Exponential moving average decay rate.',\n default=None\n )\n flags.DEFINE_float(\n 'dropout_rate',\n help='Dropout rate.',\n default=0.2\n )\n flags.DEFINE_float(\n 'std_weight_decay',\n help='Standard weight decay.',\n default=0.00004\n )\n flags.DEFINE_float(\n 'truncated_normal_stddev',\n help='The standard deviation of the truncated normal weight initializer.',\n default=0.09\n )\n flags.DEFINE_float(\n 'batch_norm_decay',\n help='Batch norm decay.',\n default=0.9997\n )\n flags.DEFINE_integer(\n 'batch_size',\n help='Training batch size.',\n default=4 # for testing purpose\n )\n flags.DEFINE_integer(\n 'epochs',\n help='Number of epochs.',\n default=5\n )", "def get_config(args):\n load_args={}\n with open(args.config, 'r') as f:\n for line in f:\n key, value = line.strip().split('=')\n try:\n value = int(value)\n except ValueError:\n try:\n value = float(value)\n except ValueError:\n value = value\n load_args[key] = value\n args.__dict__.update(load_args)" ]
[ "0.5347215", "0.5198004", "0.50993156", "0.5070632", "0.50702673", "0.50183475", "0.49987012", "0.49866545", "0.48432627", "0.48331475", "0.4829592", "0.4822263", "0.48136076", "0.48022184", "0.4784212", "0.47505832", "0.47194228", "0.4671016", "0.4670807", "0.46582836", "0.46375337", "0.46274796", "0.46180758", "0.46125904", "0.45857412", "0.45771462", "0.45741192", "0.45635003", "0.45575443", "0.455137" ]
0.55565464
0
Generates the fowarding information base dump file for the given LI [Arguments]
def fusion_api_generate_li_forwarding_information_base_dump_file(self, uri, api=None, headers=None): param = '/forwarding-information-base' return self.li.post(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genDump(exePath,inputDict,outputDict):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('DUMP'+'\\n')\n f.write(inputDict+'\\n')\n f.write(outputDict)\n runDamocles(exePath, paramFile)\n remove(paramFile)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-l\", dest=\"liglist\", help=\"list of ligands\")\n\tparser.add_option(\"-r\", dest=\"runfile\", help=\"run file\")\n\tparser.add_option(\"-o\", dest=\"logfile\", help=\"log file\")\n\tparser.add_option(\"-b\", dest=\"bkupfile\", help=\"backed up file\")\n\tparser.add_option(\"-g\", dest=\"go\", help=\"do the runs\",action=\"store_true\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\tif not options.liglist or not options.runfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not options.logfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t# --- create directories for each ligand --- #\t\n\tligands = files_from_list(options.liglist)\t\n\n\texe = \"basename $PWD\"\n\tcurrdir = commands.getoutput(exe)\n\n\tcwd = os.getcwd()\n\tls = os.listdir(cwd)\n\tprotein = currdir + \"_9.pdb\"\n\tif not protein in ls:\n\t\tprint \"cannot find protein file:\",protein\n\t\tsys.exit()\n\n\ttry:\n\t\tRUN = open(options.runfile)\n\texcept:\n\t\tprint \"unable to open run file\"\n\t\tsys.exit()\t\n\n\trunline = RUN.readline()\n\tre_het = re.compile(\"HETERO\")\n\tre_prt = re.compile(\"PROTEIN\")\n\n\tif not re_het.search(runline):\n\t\tprint \"run must contain HETERO\"\n\t\tsys.exit()\n\n\tif not re_prt.search(runline):\n\t\tprint \"run must contain PROTEIN\"\n\t\tsys.exit()\n\t\t\n\n\tfor lig in ligands:\n\t\trline = runline\n\t\tligbase = get_basefile(lig)\n\n\t\texe = \"mkdir \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + lig + \" \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + currdir + \"* \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp paths.txt \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + options.bkupfile + \" \" + ligbase\n\t\tos.system(exe)\n\n\t\trline = rline.replace(\"HETERO\", lig)\n\t\trline = rline.replace(\"PROTEIN\", protein)\n\t\t\n\t\tnewrun = ligbase + \"/\" + options.runfile\n\t\ttry:\n\t\t\tOUTRUN = open(newrun, 'w')\n\t\texcept:\n\t\t\tprint \"cannot make new run\"\n\t\t\tsys.exit()\n\n\n\t\tOUTRUN.write(rline) \n\t\tOUTRUN.close()\n\t\tos.chmod(newrun, stat.S_IRWXU)\n\n\t\tif options.go:\n\t\t\tos.chdir(ligbase)\n\t\t\tprint \" in\",os.getcwd()\n\t\t\texe = \"nice ./\" + options.runfile + \" >& \" + options.logfile\n\t\t\tos.system(exe)\n\t\t\tos.chdir(\"..\")", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Generating NUFEB simulation files\")\n\n # create nutrients\n light = Nutrient(1e-1, None, None, \"g\", \"nn\")\n co2 = Nutrient(float(args.co2), 1.9e-09, 44.01, \"l\", \"nn\")\n o2 = Nutrient(0.28125, 2.30e-9, 32, \"l\", \"nn\")\n sucrose = Nutrient(float(args.sucrose), 5.2e-10, 342.3, \"l\", \"nn\")\n gco2 = Nutrient(0, None, 44.01, \"g\", \"nn\")\n TEMPLATES_DIR = (Path(__file__).parent) / \"templates\"\n\n captureRate = round(1000 / args.timestep)\n # define dump parameters\n dump_list = {\n \"vtk_dump\": f\"dump atom_vtk all vtk {captureRate} dump*.vtu id type diameter vx vy vz fx fy fz \\n dump grid_vtk all grid/vtk {captureRate} dump_%_*.vti con\",\n \"image_dump\": f\"dump du_image all image {captureRate} image.*.jpg type diameter zoom 2 bacillus type size 1280 720 view 45 60 \\n dump_modify du_image acolor 1 green acolor 2 red\",\n \"movie_dump\": f\"dump du_mov all movie {captureRate} movie.avi type diameter zoom 1.5 bacillus type size 1280 720 view 0 0 \\n dump_modify du_mov acolor 1 green acolor 2 red\",\n \"hdf_dump\": f\"dump du_h5 all nufeb/hdf5 {captureRate} dump.h5 id type x y z vx vy vz fx fy fz radius conc reac\",\n }\n\n dumps = defaultdict(list)\n for i in range(4):\n tmp = [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"]\n dumps[tmp[i]]\n\n for dump, dump_var in zip(\n [args.vtk, args.img, args.movie, args.hdf],\n [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"],\n ):\n if dump is True or dump == \"True\":\n dumps[dump_var] = dump_list[dump_var]\n else:\n dumps[dump_var] = \"\"\n\n ## Species-specific parameters\n\n # check for runs folder\n if not os.path.isdir(\"runs\"):\n os.mkdir(\"runs\")\n x = float(args.dims.split(\",\")[0])\n y = float(args.dims.split(\",\")[1])\n z = float(args.dims.split(\",\")[2])\n for n in range(1, int(args.num) + 1):\n culture = Culture(args)\n atoms_list = []\n bacilli_list = []\n # Create list of atoms and bacilli for atom definition file\n for cell in culture.cells:\n atoms_list.append(cell.Atom())\n bacilli_list.append(cell.Bacillus())\n # make atom definition file\n for r in range(1, int(args.reps) + 1):\n L = [\n \" NUFEB Simulation\\r\\n\\n\",\n f\" {args.cells_init} atoms \\n\",\n f\" {len(culture.cell_types)} atom types \\n\",\n f\" {args.cells_init} bacilli \\n\\n\",\n f\" 0.0e-4 {x :.2e} xlo xhi \\n\",\n f\" 0.0e-4 {y :.2e} ylo yhi \\n\",\n f\" 0.0e-4 {z :.2e} zlo zhi \\n\\n\",\n \" Atoms \\n\\n\",\n ]\n atoms = L + atoms_list\n atoms.append(\"\\n\")\n atoms.append(\" Bacilli \\n\\n\")\n atoms = atoms + bacilli_list\n # write atom definition file\n f = open(\n f\"runs/atom_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{r}.in\",\n \"w+\",\n )\n f.writelines(atoms)\n RUN_DIR = (\n Path(\"runs\")\n / f\"Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}\"\n )\n if not os.path.isdir(RUN_DIR):\n os.mkdir(RUN_DIR)\n # os.mkdir(f'runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}')\n # write initial conditions json file\n dumpfile = open(RUN_DIR / \"metadata.json\", \"w\")\n # dumpfile = open(f\"/runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}/metadata.json\",'w')\n json.dump(CellInfo, dumpfile, indent=6)\n dumpfile.close()\n ###\n\n # write Inputscript\n # open the file\n filein = open(TEMPLATES_DIR / \"bacillus.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Bacillus.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Replicates\": args.reps,\n \"Timesteps\": args.ntimesteps,\n \"ts\": args.timestep,\n \"CYANOGroup\": culture.cyGroup,\n \"ECWGroup\": culture.ecwGroup,\n \"Zheight\": float(args.dims.split(\",\")[2]),\n \"CYANODiv\": culture.cyDiv,\n \"ECWDiv\": culture.ecwDiv,\n \"light\": light.concentration,\n \"co2\": co2.concentration,\n \"o2\": o2.concentration,\n \"sucrose\": sucrose.concentration,\n \"gco2\": gco2.concentration,\n \"CYANOMonod\": culture.cyMonod,\n \"ECWMonod\": culture.ecwMonod,\n \"CYANOcount\": culture.cyanoCount,\n \"ECWcount\": culture.ecwCount,\n \"v_ncyano\": culture.vcyano,\n \"v_necw\": culture.vecw,\n \"vtk_dump\": dumps[\"vtk_dump\"],\n \"image_dump\": dumps[\"image_dump\"],\n \"movie_dump\": dumps[\"movie_dump\"],\n \"hdf_dump\": dumps[\"hdf_dump\"],\n }\n )\n f = open(\n f\"./runs/Inputscript_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.lmp\",\n \"w+\",\n )\n f.writelines(result)\n\n # write local run script\n # open the file\n filein = open(TEMPLATES_DIR / \"local.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"local.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": n,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Reps\": args.reps,\n }\n )\n f = open(\n f\"./runs/local_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.sh\", \"w+\"\n )\n f.writelines(result)\n # write slurm script\n # open the file\n filein = open(TEMPLATES_DIR / \"slurm_dev.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Slurm.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"job\": f\"NUFEB_cyano{n}\",\n \"USER\": args.user,\n \"Replicates\": args.reps,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n }\n )\n _logger.info(\"Script ends here\")", "def main(args): \n if args.type == 'FILEGDB':\n create_filegdb(args.name, args.path)\n elif args.type == 'ST_GEOMETRY' or args.type == 'SPATIALITE':\n create_sqlitedb(args.name, args.type, args.path)", "def dump(self, args):\n if self.stru:\n self.stru.dump(args)\n if self.index:\n self.index.dump(args)\n if self.bank:\n self.bank.dump(args)\n if self.sys:\n self.sys.dump(args)", "def exportBulletFile(*argv):", "def printInfo(self):\n print(\"Generating %s with the following info:\" % self.args.dest)\n print(\"From: %s\" % self.srcdir)\n print(\"To: %s\" % self.desdir)\n print(\"Template: %s\" % self.args.tmpl)\n print(\"Author: %s\" % self.args.author)\n print(\"Version: %s\" % self.args.ver)\n print(\"Date: %s\" % self.args.date)\n print(\"\\n\")", "def recdump(self, args):\n if args.index:\n dbfile = self.index\n elif args.sys:\n dbfile = self.sys\n elif args.stru:\n dbfile = self.stru\n else:\n dbfile = self.bank\n\n if not dbfile:\n print(\".dat not found\")\n return\n nerr = 0\n nr_recnone = 0\n nr_recempty = 0\n tabidxref = [0] * 256\n bytexref = [0] * 256\n for i in range(1, args.maxrecs + 1):\n try:\n data = dbfile.readrec(i)\n if args.find1d:\n if data and (data.find(b\"\\x1d\") > 0 or data.find(b\"\\x1b\") > 0):\n print(\"record with '1d': %d -> %s\" % (i, b2a_hex(data)))\n break\n\n elif not args.stats:\n if data is None:\n print(\"%5d: <deleted>\" % i)\n else:\n print(\"%5d: %s\" % (i, toout(args, data)))\n else:\n if data is None:\n nr_recnone += 1\n elif not len(data):\n nr_recempty += 1\n else:\n tabidxref[data[0]] += 1\n for b in data[1:]:\n bytexref[b] += 1\n nerr = 0\n except IndexError:\n break\n except Exception as e:\n print(\"%5d: <%s>\" % (i, e))\n if args.debug:\n raise\n nerr += 1\n if nerr > 5:\n break\n\n if args.stats:\n print(\"-- table-id stats --, %d * none, %d * empty\" % (nr_recnone, nr_recempty))\n for k, v in enumerate(tabidxref):\n if v:\n print(\"%5d * %02x\" % (v, k))\n print(\"-- byte stats --\")\n for k, v in enumerate(bytexref):\n if v:\n print(\"%5d * %02x\" % (v, k))", "def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()", "def strudump(self, args):\n if not self.stru:\n print(\"missing CroStru file\")\n return\n self.dump_db_table_defs(args)", "def main():\n default_gdb_path = \"./TravelerInfo.gdb\"\n api_code_var_name = \"WSDOT_TRAFFIC_API_CODE\"\n api_code = os.environ.get(api_code_var_name)\n\n parser = argparse.ArgumentParser(\n description=\"Creates a file geodatabase using data from the WSDOT Traffic API.\")\n\n parser.add_argument(\"--gdb-path\", type=str, default=default_gdb_path,\n help='Path to where the GDB will be created. Defaults to \"%s\".' % default_gdb_path,\n nargs=\"?\")\n parser.add_argument(\n \"--templates-gdb\", help=\"Path to GDB with template feature classes. (Creating feature classes with templates is faster than using the Add Field tool.)\")\n p_help = \"WSDOT Traffic API code. Defaults to value of %s environment variable if available. If this environment variable does not exist, then this parameter is required.\" % api_code_var_name\n parser.add_argument(\"--code\", \"-c\", type=str,\n required=api_code is None, default=api_code,\n help=p_help)\n parser.add_argument(\"--schema-only\", action=\"store_true\", help=\"Using this flag will generate the tables but skips the data download and population steps.\")\n parser.add_argument(\"--log-level\", choices=(\n \"CRITICAL\",\n \"ERROR\",\n \"WARNING\",\n \"INFO\",\n \"DEBUG\",\n \"NOTSET\"\n ), default=logging.NOTSET)\n\n # default_names = [\n # \"CVRestrictions\",\n # \"HighwayAlerts\",\n # \"HighwayCameras\",\n # \"MountainPassConditions\",\n # \"TrafficFlow\",\n # \"WeatherInformation\",\n # \"TravelTimes\"\n # ]\n\n p_help = 'One or more of the following values: %s' % set(tuple(URLS.keys()) + (\"Scanweb\",))\n\n parser.add_argument(\"names\", type=str,\n nargs=argparse.REMAINDER, help=p_help)\n\n args = parser.parse_args()\n log_level = args.log_level\n if log_level:\n log_level = getattr(logging, args.log_level.upper())\n logging.basicConfig(level=log_level)\n\n names = None\n if args.names:\n names = args.names\n\n templates_gdb = args.templates_gdb\n create_gdb(args.gdb_path, args.code, templates_gdb, names, args.schema_only)", "def dump_db_table_defs(self, args):\n dbinfo = self.stru.readrec(1)\n if dbinfo[:1] != b\"\\x03\":\n print(\"WARN: expected dbinfo to start with 0x03\")\n dbdef = self.decode_db_definition(dbinfo[1:])\n self.dump_db_definition(args, dbdef)\n\n for k, v in dbdef.items():\n if k.startswith(\"Base\") and k[4:].isnumeric():\n print(\"== %s ==\" % k)\n tbdef = TableDefinition(v, dbdef.get(\"BaseImage\" + k[4:], b''))\n tbdef.dump(args)\n elif k == \"NS1\":\n self.dump_ns1(v)", "def main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help', 'loglevel=', 'logfile=', 'version'])\n except getopt.error, msg:\n print msg\n print 'for help use --help'\n sys.exit(2)\n\n md = MDRepository()\n loglevel = logging.WARN\n logfile = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print __doc__\n sys.exit(0)\n elif o in '--loglevel':\n loglevel = getattr(logging, a.upper(), None)\n if not isinstance(loglevel, int):\n raise ValueError('Invalid log level: %s' % loglevel)\n elif o in '--logfile':\n logfile = a\n elif o in '--version':\n print \"pyff version %s\" % __version__\n sys.exit(0)\n else:\n raise ValueError(\"Unknown option '%s'\" % o)\n\n log_args = {'level': loglevel}\n if logfile is not None:\n log_args['filename'] = logfile\n logging.basicConfig(**log_args)\n\n try:\n for p in args:\n plumbing(p).process(md, state={'batch': True, 'stats': {}})\n sys.exit(0)\n except Exception, ex:\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n print \"-\" * 64\n traceback.print_exc()\n print \"-\" * 64\n logging.error(ex)\n sys.exit(-1)", "def db(filename = 'P51-11'):\n import pdb\n sys.argv[1:] = ['-v', filename]\n pdb.run('extract.main()')", "def main():\n parser = argparse.ArgumentParser(description = 'Arguments for running the different modules of iLiner')\n\n parser.add_argument('module', choices=['ilash', 'ibd_depth', 'stats'], help='module choice')\n parser.add_argument('--sample', '-s', type=str, required=False, help='Sample file with file path')\n parser.add_argument('--haps', '-hp', type=str, required=False, help='Phased haplotype file with file path')\n parser.add_argument('--genetic_map', '-gm', type=str, required=False, help='Genetic map file with file path')\n parser.add_argument('--mapfile', '-mf', type=str, required=False, help='Mapfile made by the ilash module')\n parser.add_argument('--outfile_prefix', '-op', type=str, help='Prefix for all of the files produced from this module with file path')\n parser.add_argument('--ilash', '-i', type=str, required=False, help='File path to ilash')\n parser.add_argument('--ilash_output', '-io', type=str, required=False, help='Ilash output, one chromosome for ibd_depth module and all chromosomes for stats module')\n parser.add_argument('--population_file', '-pf', type=str, required=False, help='File with individual ids and population [ID]tab[Population]')\n parser.add_argument('--vcf', '-v', type=str, required=False, help='Phased VCF with file path')\n parser.add_argument('--no_indel', '-ni', action='store_true', required=False, help='Pass if you would like to remove indels from your vcf or haplotype file')\n\n args = parser.parse_args()\n log = Logger()\n if args.module == 'ilash':\n args_dict = vars(args)\n log.logger.info('You have selected ilash')\n if args_dict['sample'] != None:\n log.logger.info(\"Parsing your sample and haplotype files\")\n samp_file = LoadFiles.load_sample(args.sample)\n log.logger.info('Sample file has been loaded')\n haps_file = LoadFiles.load_haps(args.haps)\n log.logger.info('Haplotype file has been loaded')\n ilash_obj = LoadFiles(samp_file, haps_file)\n FileParser.check_sample_file(ilash_obj)\n log.logger.info('Your sample file has been validated')\n FileParser.check_same_sample(ilash_obj)\n log.logger.info('Your sample file and haplotype file populations are the same')\n if args_dict['no_indel'] == True:\n FileParser.remove_indels(ilash_obj)\n else:\n pass\n FileParser.check_alleles(ilash_obj.haps_file.iloc[:,[3]].values, ilash_obj.haps_file.iloc[:,[4]].values)\n nucfunc = np.vectorize(FileParser.nucleotide_test)\n log_array_ref = nucfunc(ilash_obj.haps_file[[3]])\n log_array_alt = nucfunc(ilash_obj.haps_file[[4]])\n FileParser.validate_nucleotides(log_array_ref, 'Reference Allele')\n FileParser.validate_nucleotides(log_array_alt, 'Alternative Allele')\n log.logger.info('Your reference and alternative alleles have been validated')\n FileParser.validate_binary(ilash_obj)\n log.logger.info('Your haplotypes have been validated')\n log.logger.info(\"Making your map and pedigree files\")\n pedfile_start = FileConvert.start_pedfile(ilash_obj)\n pedlen = int(len(pedfile_start))\n hapmap = open(args.genetic_map)\n mapfile_str = args.outfile_prefix + \".map\"\n pedfile_str = args.outfile_prefix + \".ped\"\n mapfile = open(mapfile_str, 'w')\n FileConvert.make_gpos_mapfile(ilash_obj, hapmap, mapfile)\n log.logger.info('Your map file has been made')\n hapslist = FileConvert.convert_haps(ilash_obj)\n final_array = FileConvert.make_pedfile(hapslist, pedlen, pedfile_start)\n final_list = final_array.tolist()\n pedfile = open(pedfile_str, 'w')\n for line in range(len(final_list)):\n pedfile.write(' '.join(final_list[line]) + '\\n')\n pedfile.close()\n log.logger.info('Your pedigree file has been made')\n log.logger.info('Launching iLASH')\n RunIlash.make_param_file(mapfile_str, pedfile_str, args.ilash)\n if args_dict['vcf'] != None:\n log.logger.info('Your VCF is being parsed')\n vcf_file, comments = VcfReader.read_vcf(args.vcf)\n vcf_obj = VcfReader(vcf_file, comments)\n vcf_header = vcf_file.columns.values.tolist()\n VcfParser.validate_header(vcf_header)\n VcfParser.validate_chrom(vcf_obj)\n if args_dict['no_indels'] == True:\n VcfParser.remove_indels(vcf_ojb)\n else:\n pass\n VcfParser.diff_alt_ref(vcf_obj.vcf['REF'], vcf_obj.vcf['ALT'])\n log.logger.info('Your VCF has been parsed')\n nucfunc = np.vectorize(VcfParser.nucleotide_test)\n log_array_ref = nucfunc(vcf_obj.vcf['REF'])\n log_array_alt = nucfunc(vcf_obj.vcf['ALT'])\n VcfParser.validate_nucleotides(vcf_obj, log_array_ref, 'Reference Allele')\n VcfParser.validate_nucleotides(vcf_obj, log_array_alt, 'Alternative Allele')\n binary_array = np.isin(vcf_obj.vcf.iloc[:,9:], ['0|0', '0|1', '1|0', '1|1'])\n VcfParser.validate_binary(vcf_obj, binary_array)\n people = VcfMakeFiles.get_people(vcf_header)\n pedfile_start = VcfMakeFiles.start_pedfile(people)\n hapslist = VcfMakeFiles.convert_haps(vcf_obj)\n pedlen = int(len(haplist[0]/2))\n final_array = VcfMakeFiles.make_pedfile(hapslist, pedlen, pedfile_start)\n final_list = final_array.tolist()\n mapfile_str = args.outfile_prefix + \".map\"\n pedfile_str = args.outfile_prefix + \".ped\"\n mapfile = open(mapfile_str, 'w')\n pedfile = open(pedfile_str, 'w')\n hapmap = open(args.genetic_map)\n VcfMakeFiles.make_gpos_mapfile(vcf_obj, hapmap, mapfile)\n log.logger.info('Your map file has been made')\n for line in range(len(final_list)):\n pedfile.write(' '.join(final_list[line]) + '\\n')\n pedfile.close()\n log.logger.info('Your pedigree file has been made')\n log.logger.info('Launching iLASH')\n RunIlash.make_param_file(mapfile_str, pedfile_str, args.ilash)\n elif args.module == 'ibd_depth':\n log.logger.info('You have selected ibd_depth')\n mapfile = IBDDepth.load_files(args.mapfile)\n log.logger.info('Your map file has been loaded')\n ilash_output, snps = IBDDepth.load_ilash(args.ilash_output)\n log.logger.info('Your ilash output file has been loaded')\n mapfile = IBDDepth.find_ibd(mapfile, snps)\n four_up, four_down = IBDDepth.get_stdev(mapfile)\n IBDDepth.make_depth_plot(mapfile, args.outfile_prefix, four_up, four_down)\n log.logger.info('Your IBD depth figures have been made')\n log.logger.info('Removing outliers and creating an outlier free file')\n IBDDepth.remove_outlier(mapfile, ilash_output, args.outfile_prefix, four_up, four_down)\n elif args.module == 'stats':\n log.logger.info('You have selected stats')\n ilash_results = IBDStats.load_ilash(args.ilash_output)\n log.logger.info('Your iLASH output has been loaded')\n ilash_pops = IBDStats.load_popfile(args.population_file)\n log.logger.info('Your population file has been loaded')\n stats_obj = IBDStats(ilash_results, ilash_pops)\n IBDStats.remove_self_ibd(stats_obj)\n IBDStats.stratify_by_pop(stats_obj)\n IBDStats.make_dot_plot(stats_obj, args.outfile_prefix)\n log.logger.info('Your pair plot has been made')\n IBDStats.make_violin_plot(stats_obj, args.outfile_prefix)\n log.logger.info('Your violin plot has been made')\n IBDStats.get_ibd_stats(stats_obj, log)\n H, pval = (IBDStats.get_kw_h(stats_obj))\n log.logger.info('Kruskal-Wallis Test')\n log.logger.info('H-statistic: ' + str(H))\n log.logger.info('P-Values: ' + str(pval))\n if pval < 0.05:\n log.logger.info('Reject the null hypothesis - A significant differences exists between groups.')\n log.logger.info('Post-hoc Wilcoxon Rank Sum Test')\n IBDStats.get_ranksum(stats_obj, log)\n else:\n log.logger.info('Fail to reject the hypothesis - A discernable difference does not exist between the groups')\n pop_heatmap = IBDStats.fraction_ibd_sharing(stats_obj)\n fig = pop_heatmap.get_figure()\n fig.set_size_inches(13.0, 13.0)\n fig.savefig(args.outfile_prefix + '_heatmap.png')\n log.logger.info('Your heatmap has been made')", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def processfile(args, fh):\n if args.quick:\n scanner = quickScanZip(args, fh)\n else:\n scanner = findPKHeaders(args, fh)\n\n def checkarg(arg, ent):\n if not arg:\n return False\n return '*' in arg or ent.name in arg\n def checkname(a, b):\n if a and '*' in a: return True\n if b and '*' in b: return True\n l = 0\n if a: l += len(a)\n if b: l += len(b)\n return l > 1\n\n if args.verbose and not (args.cat or args.raw or args.save):\n print(\" 0304 need flgs mth stamp --crc-- compsize fullsize nlen xlen namofs xofs datofs endofs\")\n print(\" 0102 crea need flgs mth stamp --crc-- compsize fullsize nlen xlen clen dsk0 attr osattr datptr namofs xofs cmtofs endofs\")\n for ent in scanner:\n if args.cat or args.raw or args.save:\n if args.quick and isinstance(ent, CentralDirEntry) or \\\n not args.quick and isinstance(ent, LocalFileHeader):\n ent.loaditems(fh)\n do_cat = checkarg(args.cat, ent)\n do_raw = checkarg(args.raw, ent)\n do_save= checkarg(args.save, ent)\n\n do_name= checkname(args.cat, args.raw)\n\n if do_name:\n print(\"\\n===> \" + ent.name + \" <===\\n\")\n\n sys.stdout.flush()\n blks = zipraw(fh, ent)\n\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n if do_cat or do_save:\n blks = skipbytes(blks, 12, args)\n\n if do_cat:\n sys.stdout.buffer.writelines(zipcat(blks, ent))\n if do_raw:\n sys.stdout.buffer.writelines(blks)\n if do_save:\n savefile(args.outputdir, ent.name, zipcat(blks, ent))\n else:\n ent.loaditems(fh)\n if args.verbose or not args.quick:\n print(\"%08x: %s\" % (ent.pkOffset, ent))\n else:\n print(ent.summary())\n if hasattr(ent, \"comment\") and ent.comment and not args.dumpraw:\n print(ent.comment)\n if args.dumpraw and hasattr(ent, \"extraLength\") and ent.extraLength:\n print(\"%08x: XTRA: %s\" % (ent.extraOffset, binascii.b2a_hex(getbytes(fh, ent.extraOffset, ent.extraLength))))\n if args.dumpraw and hasattr(ent, \"comment\") and ent.comment:\n print(\"%08x: CMT: %s\" % (ent.commentOffset, binascii.b2a_hex(getbytes(fh, ent.commentOffset, ent.commentLength))))\n if args.dumpraw and isinstance(ent, LocalFileHeader):\n blks = zipraw(fh, ent)\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n\n blockdump(ent.dataOffset, blks)", "def hexdump(args=None):\n args = parser.parse_args(args)\n with LogSetup(args):\n contents = args.file.read()\n args.file.close()\n dump(contents, width=args.width)", "def main(args):\n write_files = args.no_write is False\n ffiles = _open(args)\n ffiles = update_bpms(ffiles)\n if write_files:\n _write_files(ffiles, args.prefix, args.clobber)\n\n return", "def main():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument('-V', '--version', action='version', version=VERSION)\n\n\n file_group = parser.add_argument_group('Input Files')\n file_group.add_argument('-f', dest='traj', required=True, type=str,\n help='trajectory file (XTC/TRR/GRO/PDB ...)')\n file_group.add_argument('-s', dest='tpr', required=True, type=str,\n help='tpr file (TPR)')\n file_group.add_argument('-o', dest='outpath', type=str,\n help='name of the mapped trajectory (XTC/GRO)')\n file_group.add_argument('-m', dest='map_file', type=str,\n help='.mapping file or path to directory of .map files')\n\n mapping_group = parser.add_argument_group('Mapping Options')\n mapping_group.add_argument('-mode', dest='mode', required=False, type=str,\n help='COG or COM mapping', default='COG')\n mapping_group.add_argument('-pbc', action='store_true', required=False, dest='pbc_complete',\n help='complete pbc with MDAnalysis; this is slow!')\n mapping_group.add_argument('-mols', dest='mol_names', required=True, type=str, nargs='+',\n help='names of molecules to consider when mapping as in the [moleculetypes] directive')\n mapping_group.add_argument('-add_H', dest='h_association', nargs='+', type=lambda s: s.split(':'),\n default=[],\n help='atom-types corresponding to CH3, CH2, CH1 for aliphatic groups and CH2d for double bonds.')\n args = parser.parse_args()\n\n print(\"INFO - Loading universe\")\n # load trajectory\n init_universe = UniverseHandler(args.mol_names,\n args.tpr,\n args.traj,\n in_memory=True)\n if args.pbc_complete:\n print(\"INFO - PBC completing trajectory\")\n init_universe.pbc_complete()\n\n if args.h_association:\n print(\"INFO - Adding Hydrogen to united-atoms\")\n treated_atoms = init_universe.shift_united_atom_carbons(dict(args.h_association))\n else:\n treated_atoms = np.array([])\n\n print(\"INFO - Loading mapping files\")\n #determine if we have a single .mapping file or a directory of .map files\n map_path = pathlib.Path(args.map_file)\n if map_path.is_file() == True:\n with open(args.map_file, \"r\") as _file:\n lines = _file.readlines()\n elif map_path.is_dir() == True:\n l = []\n for i in map_path.glob('*.map'):\n with open(i, \"r\") as _file:\n l.append(_file.readlines())\n if len(l) > 0:\n lines = [item for sublist in l for item in sublist]\n else:\n msg = (\"Couldn't find any .map files in the directory given.\"\n \"Please check the -m argument!\")\n raise IOError(msg)\n else:\n msg = (\"\\nCannot determine if you have given me a single .mapping file\\n\"\n \"or a directory of .map files. Please check!\\n\")\n raise IOError(msg)\n\n mappings = read_mapping(lines)[0]\n\n print(\"INFO - Mapping universe - indices\")\n # first mapp the atom indices\n mapped_atoms, bead_idxs = forward_map_indices(init_universe,\n mappings)\n n_frames = len(init_universe.trajectory)\n\n print(\"INFO - Mapping universe - positions\")\n mapped_atoms = numba.typed.List(mapped_atoms)\n bead_idxs = numba.typed.List(bead_idxs)\n # extract the position array from universe\n # if it's not a trajectory we have to emulate\n # a single frame\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n if file_extension in [\"xtc\", \"trr\"]:\n positions = init_universe.trajectory.coordinate_array\n else:\n positions = init_universe.atoms.positions\n positions = positions.reshape(1, -1, 3)\n\n mapped_trajectory = forward_map_positions(mapped_atoms,\n bead_idxs,\n positions,\n n_frames,\n args.mode,\n treated_atoms)\n\n print(\"INFO - Mapping universe - building pos-array\")\n cg_universe = create_new_universe(init_universe, mapped_trajectory, mappings)\n\n # write coordinate\n print(\"INFO - Writing CG trajectory\")\n if args.traj:\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n else:\n file_extension = \"xtc\"\n\n if file_extension in [\"xtc\", \"trr\"]:\n cg_beads = cg_universe.atoms\n with mda.Writer(args.outpath,\n multiframe=True,\n n_atoms=len(cg_universe.atoms)) as mapped:\n for time_step in cg_universe.trajectory:\n mapped.write(cg_beads)\n else:\n cg_universe.atoms.positions = cg_universe.trajectory.coordinate_array[0]\n cg_beads = cg_universe.atoms\n cg_universe.atoms.dimensions = init_universe.atoms.dimensions\n with mda.Writer(args.outpath, n_atoms=len(cg_universe.atoms)) as mapped:\n mapped.write(cg_beads)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-o\", dest=\"outfile\", help=\"outfile\")\n\tparser.add_option(\"-O\", dest=\"outlist\", help=\"outlist\")\n\tparser.add_option(\"--no_gly\", dest=\"no_gly\", help=\"no glycine at ss\", action=\"store_true\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\toutfiles = []\n\tif options.outlist:\n\t\toutfiles = files_from_list(options.outlist)\n\telif options.outfile:\n\t\toutfiles.append(options.outfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tnfiles = len(pdbfiles)\n\tif len(outfiles) != nfiles:\n\t\tprint \"number of files differ\"\n\t\tparser.print_help()\n\t\tsys.exit()\n\t\t\n\t\n\tenz = Enzyme()\n\tresfile = Resfile()\n\tfor ifile in range(nfiles):\n\t\tenz.clear()\n\t\tresfile.clear()\n\n\t\tenz.readPDB(pdbfiles[ifile])\n\t\tresfile.setMolecule(enz)\n\n\t\tif options.no_gly:\n\t\t\tnatss = getSecondaryStructure(pdbfiles[ifile])\n\t\t\tif len(natss) < enz.protein.numResidues():\n\t\t\t\tprint \"number of residues and secondary structure do no match\"\n\t\t\t\tsys.exit()\n\n\n\t\tprot = enz.protein\t\n\t\tlig = enz.ligand\n\t\tnres = len(prot.residue)\n\n\t\tdesignable = [False]*nres\n\t\trepackable = [False]*nres\n\t\tfor i in range(nres):\n\t\t\tres = prot.residue[i]\n\t\t\tif enz.isCatalytic(res):\n\t\t\t\tresfile.setCatalytic(i)\n\t\t\telse:\n\t\t\t\tif res.name == \"GLY\":\n\t\t\t\t\tCB = res.getAtom(\" CA \")\n\t\t\t\telse:\n\t\t\t\t\tCB = res.getAtom(\" CB \")\n\n\t\t\t\t#if options.only_cat:\n\t\t\t\t# --- get distance to ligand --- #\n\t\t\t\tdistCB = closestApproachToResidue(CB,lig)\n\t\t\t\tif distCB < 6.0:\n\t\t\t\t\tdesignable[i] = True\n\t\t\t\telif distCB < 8.0:\n\t\t\t\t\tif isResPointingToRes(res, lig, cutoff=60):\n\t\t\t\t\t\tdesignable[i] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\trepackable[i] = True\n\t\t\t\telif distCB < 10.0:\n\t\t\t\t\trepackable[i] = True\n\n\t\t\t\t\n\t\t\t\t# --- get distance to residue \n\t\t\t\tfocusedRes = None\n\t\t\t\tmindist = 5000\n\t\t\t\tfor cres in enz.catalytic:\n\t\t\t\t\tdist = closestApproachToResidue(CB, cres)\n\t\t\t\t\tif dist < mindist:\n\t\t\t\t\t\tfocusedRes = cres\n\t\t\t\t\t\tmindist = dist\n\n\t\t\t\tif focusedRes != None:\n\t\t\t\t\tif mindist < 5.5:\n\t\t\t\t\t\tif isResPointingToRes(res, focusedRes, cutoff=60):\n\t\t\t\t\t\t\tdesignable[i] = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trepackable[i] = True\n\t\t\t\t\telif mindist < 7.0:\n\t\t\t\t\t\trepackable[i] = True\n\n\n\t\tfor i in range(nres):\n\t\t\tres = enz.protein.residue[i]\n\t\t\tif designable[i]:\n\t\t\t\tif options.no_gly and res.name != \"GLY\":\n\t\t\t\t\tif natss[i] == \"H\" or natss[i] == \"E\":\n\t\t\t\t\t\tresfile.designNoGly(i)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresfile.designOnly(i)\n\t\t\t\telse:\n\t\t\t\t\tresfile.designOnly(i)\n\t\t\telif repackable[i]:\n\t\t\t\tresfile.repackOnly(i)\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\tresfile.write(outfiles[ifile])", "def rnase_p_model_info(filename, db_url, output):\n r2dt.write_rfam(filename, db_url, output)", "def main(): \n for info_hash_record in info_hashs:\n get_and_save_bt_info(info_hash_record)", "def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])", "def help_dump(self):\n print(DUMP)", "def factory(top_dir):\n dct = {}\n models = {}\n HI = {}\n metals = {}\n HIabs = {}\n metalabs = {}\n for dirpath, dirname, filename in os.walk(top_dir):\n if str(os.path.split(dirpath)[-1]).strip() in [\"ascii (only HI)\",\n \"ascii (only Metal)\",\n \"xmls\"]:\n for f in filename:\n fname = os.path.join(dirpath, f)\n name = f[0:5]\n\n if f.endswith('.txt'):\n if os.path.split(dirpath)[-1] == \"ascii (only HI)\":\n if \"spec\" in os.path.split(fname)[-1]:\n HI[name] = fname\n elif \"abs\" in os.path.split(fname)[-1]:\n HIabs[name] = LineDump(fname)\n elif os.path.split(dirpath)[-1] == \"ascii (only Metal)\":\n if \"spec\" in os.path.split(fname)[-1]:\n metals[name] = fname\n elif \"abs\" in os.path.split(fname)[-1]:\n metalabs[name] = LineDump(fname)\n elif f.endswith('.xml'):\n if \"HI\" in fname and not \"only\" in fname:\n models[name] = Model(xmlfile=fname)\n models[name].read()\n else:\n pass\n\n for key in models.keys():\n try:\n dct[key] = {\"HI_sp\": Spectrum.sniffer(HI[key]),\n \"metal_sp\": Spectrum.sniffer(metals[key]),\n \"HI\": HIabs[key],\n \"metal\": metalabs[key],\n \"model\": models[key]}\n except KeyError:\n print(\"skipping %s\" % key)\n\n for attr in \"HI_sp metal_sp\".split():\n if type(dct[key][attr]) is TextSpectrum:\n dct[key][attr] = DumpData.format_correction(dct[key][attr])\n\n return [DumpData(**dct[name]) for name in dct.keys()]", "def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename", "def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.set_description(main.__doc__)\n\n\n\t(options, args) = parser.parse_args()\n\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\t\t\n\tprotein = Enzyme()\n\tfor file in pdbfiles:\n\t\tprotein.readPDB(file)\n\t\tlig = protein.ligand\n\t\tif lig == None:\n\t\t\tprint \"no ligand found for file:\",file\n\t\t\tsys.exit()\n\n\t\ttot = lig.Erep + lig.Eatr + lig.EhbSC\n\t\tprint file,lig.Erep,lig.Eatr,lig.EhbSC,tot\n\t\tprotein.clear()" ]
[ "0.6106935", "0.5797337", "0.5723794", "0.5647623", "0.5582486", "0.5572628", "0.55233026", "0.55159366", "0.5514", "0.5508061", "0.54456484", "0.5421948", "0.5406592", "0.5377717", "0.53716934", "0.5368791", "0.53619283", "0.5353382", "0.5350798", "0.5341077", "0.53161365", "0.5312812", "0.5310316", "0.53063947", "0.5274205", "0.52603525", "0.52513874", "0.52356297", "0.52072966", "0.5206092" ]
0.59859556
1
Gets the fowarding information base dump file for the given LI [Arguments]
def fusion_api_get_li_forwarding_information_base_dump_file(self, uri, localfile, api=None, headers=None): return self.li.get_file(uri=uri, localfile=localfile, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDumpFilePath(self, args):\n \n try:\n path = args[1]\n except:\n raise ArgsException, 'Second argument (dump file) was not found'\n \n return path", "def fusion_api_generate_li_forwarding_information_base_dump_file(self, uri, api=None, headers=None):\n param = '/forwarding-information-base'\n return self.li.post(uri=uri, api=api, headers=headers, param=param)", "def get_runinfo_basename():\n return \"dumpruninfo\"", "def make_dump_file_name(args: Namespace, wp_config: Dict, now: datetime) -> Location:\n\n if not args.snapshot_base_name:\n base_name = wp_config[\"db_name\"]\n else:\n base_name = args.snapshot_base_name\n\n name = args.file_name_template.format(base=base_name, time=now.isoformat() + \"Z\")\n\n return args.backup_dir.child(name)", "def db(filename = 'P51-11'):\n import pdb\n sys.argv[1:] = ['-v', filename]\n pdb.run('extract.main()')", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def open_snap_file(self, ifile):\n fname = \"%s/snapdir_%03d/%s_%03d.%d\" % (self.basedir, self.isnap, \n self.basename, self.isnap, ifile)\n return gadget_snapshot.open(fname)", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def formatdb(fastadata={},fname=\"\"):\n if not fastadata and fname:\n OSsystem(\"%s -i %s\" % (FORMATDB_PATH,fname))\n elif fastadata and fname:\n pass\n else:\n raise \"inproper input\"\n return fname", "def io_pickle_file():\n return join(_PACKAGE_PATH, 'database.p')", "def get_progress_file_info(filename):\n # Open the file and read it\n with open(filename,'r') as f:\n data = f.readlines()\n # Eat whitespace\n data = [s.lstrip().rstrip() for s in data]\n # Split the strings in each line \n data[0] = data[0].split(' ')\n data[1] = data[1].split(\"/\")\n # Now, based on position, we can start getting parameters\n current_sweep,final_sweep = [int(i) for i in data[1]]\n target_area = int(data[0][0])\n area_damping = float(data[0][1])\n save_every_n_sweeps = int(data[0][-1])\n if len(data[0]) == 3:\n target_std = default_target_std\n std_damping = default_std_damping\n else:\n target_std,std_damping = [float(s) for s in data[0][-2:-1]]\n # Gather data function is, of course, gather_data_to_1_file\n gather_data_function = output.gather_data_to_1_file\n # Same for algorithm\n algorithm = default_algorithm\n # v5 and v6 damping don't matter\n v5damping = default_v5damping\n v6damping = default_v6damping\n # The filename we want to output is the same file but with\n # *.boundary2p1 at the end.\n filename = filename.rstrip(output.tracking_suffix)+output.output_suffix\n # Return the parameters as a tuple\n params = parameters(filename,\n target_area,area_damping,\n target_std,std_damping,\n current_sweep,final_sweep,\n save_every_n_sweeps,\n v5damping,v6damping,\n algorithm,\n gather_data_function)\n return params", "def factory(top_dir):\n dct = {}\n models = {}\n HI = {}\n metals = {}\n HIabs = {}\n metalabs = {}\n for dirpath, dirname, filename in os.walk(top_dir):\n if str(os.path.split(dirpath)[-1]).strip() in [\"ascii (only HI)\",\n \"ascii (only Metal)\",\n \"xmls\"]:\n for f in filename:\n fname = os.path.join(dirpath, f)\n name = f[0:5]\n\n if f.endswith('.txt'):\n if os.path.split(dirpath)[-1] == \"ascii (only HI)\":\n if \"spec\" in os.path.split(fname)[-1]:\n HI[name] = fname\n elif \"abs\" in os.path.split(fname)[-1]:\n HIabs[name] = LineDump(fname)\n elif os.path.split(dirpath)[-1] == \"ascii (only Metal)\":\n if \"spec\" in os.path.split(fname)[-1]:\n metals[name] = fname\n elif \"abs\" in os.path.split(fname)[-1]:\n metalabs[name] = LineDump(fname)\n elif f.endswith('.xml'):\n if \"HI\" in fname and not \"only\" in fname:\n models[name] = Model(xmlfile=fname)\n models[name].read()\n else:\n pass\n\n for key in models.keys():\n try:\n dct[key] = {\"HI_sp\": Spectrum.sniffer(HI[key]),\n \"metal_sp\": Spectrum.sniffer(metals[key]),\n \"HI\": HIabs[key],\n \"metal\": metalabs[key],\n \"model\": models[key]}\n except KeyError:\n print(\"skipping %s\" % key)\n\n for attr in \"HI_sp metal_sp\".split():\n if type(dct[key][attr]) is TextSpectrum:\n dct[key][attr] = DumpData.format_correction(dct[key][attr])\n\n return [DumpData(**dct[name]) for name in dct.keys()]", "def get_fund_logfile():\n return \"fund\" + get_day() + \".log\"", "def debug_filename(pe):\n if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):\n for i in pe.DIRECTORY_ENTRY_DEBUG:\n if hasattr(i.entry, 'PdbFileName'):\n return i.entry.PdbFileName.decode('utf-8', 'ignore')\n return None", "def dump_file_path(self) -> str:\n return pulumi.get(self, \"dump_file_path\")", "def full_info(files: List[str], args, dir_: str ='.') -> List[str]:\n temp_info = []\n for item in files:\n f_info = {}\n f_st = os.stat(os.path.join(CURRENT_DIR, dir_, item))\n f_info['mpde'] = f'{stat.filemode(f_st.st_mode):10}'\n f_info['nlink'] = f'{f_st.st_nlink:>3}'\n f_info['uid'] = f'{f_st.st_uid:>3}'\n size = f_st.st_size\n if args.block_size:\n size = ceil(size / args.block_size)\n f_info['size'] = f'{size:>8}'\n date = dt.datetime.fromtimestamp(f_st.st_mtime)\n if (dt.datetime.now() - date).days / 30 > 6:\n date_format = '%b %d %Y'\n else:\n date_format = '%b %d %I:%M'\n f_info['time'] = f'{date.strftime(date_format)} '\n f_info['name'] = f'{item:<}'\n temp_info.append(\n ' '.join([f_info['mpde'], f_info['nlink'], f_info['uid'],\n f_info['size'], f_info['time'], f_info['name']])\n )\n temp_info.append('\\n')\n return temp_info", "def dump_file_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dump_file_path\")", "def get_idumpload(self):\n return self.read_register(4102, 1, 3)", "def fusion_api_get_li_forwarding_information_base(self, uri, param='', api=None, headers=None):\n param = '/forwarding-information-base%s' % param\n return self.li.get(uri=uri, api=api, headers=headers, param=param)", "def get_dump(lang, filename):\n from .utils import get_dump as download\n click.echo('Starting to download Wikipedia dump for lang {}.'.format(lang))\n download(lang, filename=filename)\n click.echo('Download finished')", "def recdump(self, args):\n if args.index:\n dbfile = self.index\n elif args.sys:\n dbfile = self.sys\n elif args.stru:\n dbfile = self.stru\n else:\n dbfile = self.bank\n\n if not dbfile:\n print(\".dat not found\")\n return\n nerr = 0\n nr_recnone = 0\n nr_recempty = 0\n tabidxref = [0] * 256\n bytexref = [0] * 256\n for i in range(1, args.maxrecs + 1):\n try:\n data = dbfile.readrec(i)\n if args.find1d:\n if data and (data.find(b\"\\x1d\") > 0 or data.find(b\"\\x1b\") > 0):\n print(\"record with '1d': %d -> %s\" % (i, b2a_hex(data)))\n break\n\n elif not args.stats:\n if data is None:\n print(\"%5d: <deleted>\" % i)\n else:\n print(\"%5d: %s\" % (i, toout(args, data)))\n else:\n if data is None:\n nr_recnone += 1\n elif not len(data):\n nr_recempty += 1\n else:\n tabidxref[data[0]] += 1\n for b in data[1:]:\n bytexref[b] += 1\n nerr = 0\n except IndexError:\n break\n except Exception as e:\n print(\"%5d: <%s>\" % (i, e))\n if args.debug:\n raise\n nerr += 1\n if nerr > 5:\n break\n\n if args.stats:\n print(\"-- table-id stats --, %d * none, %d * empty\" % (nr_recnone, nr_recempty))\n for k, v in enumerate(tabidxref):\n if v:\n print(\"%5d * %02x\" % (v, k))\n print(\"-- byte stats --\")\n for k, v in enumerate(bytexref):\n if v:\n print(\"%5d * %02x\" % (v, k))", "def get_dbtrace_output(output_file):\n\n pass", "def main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help', 'loglevel=', 'logfile=', 'version'])\n except getopt.error, msg:\n print msg\n print 'for help use --help'\n sys.exit(2)\n\n md = MDRepository()\n loglevel = logging.WARN\n logfile = None\n for o, a in opts:\n if o in ('-h', '--help'):\n print __doc__\n sys.exit(0)\n elif o in '--loglevel':\n loglevel = getattr(logging, a.upper(), None)\n if not isinstance(loglevel, int):\n raise ValueError('Invalid log level: %s' % loglevel)\n elif o in '--logfile':\n logfile = a\n elif o in '--version':\n print \"pyff version %s\" % __version__\n sys.exit(0)\n else:\n raise ValueError(\"Unknown option '%s'\" % o)\n\n log_args = {'level': loglevel}\n if logfile is not None:\n log_args['filename'] = logfile\n logging.basicConfig(**log_args)\n\n try:\n for p in args:\n plumbing(p).process(md, state={'batch': True, 'stats': {}})\n sys.exit(0)\n except Exception, ex:\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n print \"-\" * 64\n traceback.print_exc()\n print \"-\" * 64\n logging.error(ex)\n sys.exit(-1)", "def TitleInfo(currentFile):\n Title=[]\n with open(currentFile) as fileIn:\n print(':\\033[1;31mI\\033[1;m')\n print('PDB File:\\033[1;31m %s\\033[1;m'%currentFile)\n line = fileIn.readline()\n while line:\n if line.startswith('TITLE'):\n Title.append(line)\n\n line = fileIn.readline()\n if len(Title) == 1:\n Str = \"\".join(Title)\n x = Str.replace('TITLE', '')\n Str1 = x.lstrip()\n print('Title: %s'%Str1)\n if len(Title) > 1:\n #Str = \"\".join(l)\n t =(Title[0])\n z = (Title[1])\n t1 = t.replace('TITLE', '')\n z1 = z.replace('TITLE', '')\n z2 = z1.replace('2', '')\n t2 = t1.strip()\n z3 = z2.strip()\n print('Title:%s'%t2+z3)\n #return Title", "def base():\n print(CFG.base.path)", "def get_output_file():\n if len(sys.argv) < 4:\n return -1\n return sys.argv[3]", "def gfxinfo_get_last_dump(filepath):\n record = ''\n with open(filepath, 'r') as fh:\n fh_iter = _file_reverse_iter(fh)\n try:\n while True:\n buf = next(fh_iter)\n ix = buf.find('** Graphics')\n if ix >= 0:\n return buf[ix:] + record\n\n ix = buf.find(' **\\n')\n if ix >= 0:\n buf = next(fh_iter) + buf\n ix = buf.find('** Graphics')\n if ix < 0:\n msg = '\"{}\" appears to be corrupted'\n raise RuntimeError(msg.format(filepath))\n return buf[ix:] + record\n record = buf + record\n except StopIteration:\n pass", "def file_loc(self):\n\t\treturn self.__dbfile", "def filename(self):\n return self._dbfile", "def main():\n parser = argparse.ArgumentParser(\n description='Convert Thunderbird address ldif to your LDAP ldif,'\n ' or the reverse.')\n parser.add_argument('-b',\n metavar='BASE_PATH',\n dest='base_path',\n default='',\n help='ldap base path')\n parser.add_argument('-f',\n metavar='FILE',\n dest='fname',\n type=argparse.FileType(),\n required=True,\n help='ldif file')\n\n args = parser.parse_args()\n convert(args.fname, args.base_path)" ]
[ "0.60333997", "0.5776037", "0.5622542", "0.5518955", "0.5258214", "0.5153317", "0.5099158", "0.49888653", "0.49863005", "0.49706173", "0.49517483", "0.4934412", "0.49315077", "0.49253023", "0.49036095", "0.48844084", "0.48682192", "0.4867699", "0.48596936", "0.48587334", "0.48530915", "0.48367888", "0.48360893", "0.48319337", "0.4808948", "0.48013365", "0.47968382", "0.47956473", "0.47904187", "0.47851342" ]
0.6390368
0
Updates an LI using the PATCH http verb. [Arguments]
def fusion_api_patch_li(self, body=None, uri=None, api=None, headers=None): return self.li.patch(body, uri, api, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def patch(self, *args, **kwargs):\n self.request(\"patch\", *args, **kwargs)", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def patch(self, url, body=None, headers=None):\n return self._request('PATCH', url, body, headers)", "def patch(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'patch', api_path, *args, **kwargs)", "def patch(self, request , pk=None):\n return Response({'message':'PATCH'})", "def patch(*args, **kwargs):\n return update(*args, patch=True, **kwargs)", "def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})", "def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})", "def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})", "def patch(self, request, pk=None):\n return Response({'method': 'PATCH'})", "def patch(self, obj):\r\n self.require_item()\r\n request = http.Request('PATCH', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json", "def patch(self, *args, **kwargs):\n return self.handle_patch_request()", "def patch(self, request, pk=None):\n return Response({'message': 'patch'})", "def patch(self, url_or_path, *args, **kwargs):\n return self.request.patch(url_or_path, *args, **kwargs).json()", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def patch(self, path, body):\n url = urljoin(self.api_endpoint, path)\n response = requests.patch(url, json=body, headers=self.headers)\n return self._check_response(response)", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def patch(self, request, pk=None):\n\n return Response({'method': 'patch'})", "def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def patch(self,request,pk = None):\n return Response({'method': 'PATCH'})", "def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def fusion_api_update_ls(self, body=None, uri=None, api=None, headers=None):\n return self.ls.put(body, uri, api, headers)", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def _patch(self, path=None, version=None, params=None,\n data=None, json=None, header=None):\n return self.client.patch(module='mam', path=path, version=version,\n params=params, data=data,\n json=json, header=header)", "def sli_update(obj, product_name, name, sli_file):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n product = product[0]\n\n slis = client.sli_list(product, name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n with Action('Updating SLI {} for product: {}'.format(name, product_name), nl=True) as act:\n sli = json.load(sli_file)\n\n validate_sli(obj, sli, act)\n\n if not act.errors:\n sli['uri'] = slis[0]['uri']\n s = client.sli_update(sli)\n\n print(json.dumps(s, indent=4))", "def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})" ]
[ "0.7248336", "0.66315514", "0.65434635", "0.6535109", "0.65075743", "0.65045047", "0.6449889", "0.64201784", "0.64201784", "0.64201784", "0.64120096", "0.63827085", "0.6375136", "0.6340054", "0.6335", "0.6311822", "0.63076824", "0.63023853", "0.62987214", "0.62633204", "0.62602615", "0.6258791", "0.62450033", "0.62249273", "0.62170285", "0.6211312", "0.6171201", "0.61665225", "0.61452883", "0.6114973" ]
0.71836835
1
Gets the Port Monitor for the given LI [Arguments]
def fusion_api_get_li_port_monitor_configuration(self, uri, api=None, headers=None): param = '/port-monitor' return self.li.get(uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMonitor(self) -> ghidra.util.task.TaskMonitor:\n ...", "def get(self, *args):\n return _libsbml.ListOfPorts_get(self, *args)", "def get_monitor(name: Optional[str] = None,\n partition: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMonitorResult:\n __args__ = dict()\n __args__['name'] = name\n __args__['partition'] = partition\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('f5bigip:ltm/getMonitor:getMonitor', __args__, opts=opts, typ=GetMonitorResult).value\n\n return AwaitableGetMonitorResult(\n adaptive=pulumi.get(__ret__, 'adaptive'),\n adaptive_limit=pulumi.get(__ret__, 'adaptive_limit'),\n database=pulumi.get(__ret__, 'database'),\n defaults_from=pulumi.get(__ret__, 'defaults_from'),\n destination=pulumi.get(__ret__, 'destination'),\n filename=pulumi.get(__ret__, 'filename'),\n id=pulumi.get(__ret__, 'id'),\n interval=pulumi.get(__ret__, 'interval'),\n ip_dscp=pulumi.get(__ret__, 'ip_dscp'),\n manual_resume=pulumi.get(__ret__, 'manual_resume'),\n mode=pulumi.get(__ret__, 'mode'),\n name=pulumi.get(__ret__, 'name'),\n partition=pulumi.get(__ret__, 'partition'),\n receive_disable=pulumi.get(__ret__, 'receive_disable'),\n reverse=pulumi.get(__ret__, 'reverse'),\n time_until_up=pulumi.get(__ret__, 'time_until_up'),\n timeout=pulumi.get(__ret__, 'timeout'),\n transparent=pulumi.get(__ret__, 'transparent'),\n username=pulumi.get(__ret__, 'username'))", "def head_port_monitoring(self):\n return self.head_args.port_monitoring if self.head_args else None", "def monitor(self) -> HwMonitor:\n return self._montior", "def get_monitor_output(name: Optional[pulumi.Input[str]] = None,\n partition: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMonitorResult]:\n ...", "def getPort(self, *args):\n return _libsbml.CompModelPlugin_getPort(self, *args)", "def parse_monitor(self):\n return DEFAULT_MONITOR", "def monitor_from_window(h_wnd, dw_flags):\n _monitor_from_window = WINDLL.user32.MonitorFromWindow\n _monitor_from_window.argtypes = [HWND, DWORD]\n _monitor_from_window.restype = HMONITOR\n\n return _monitor_from_window(h_wnd, dw_flags)", "def set_monitor(w_card):\n\n # standard name for the monitor interfaces\n mon_id = \"mon{}\".format(w_card.phy)\n\n if mon_id not in pyw.winterfaces():\n # this monitor interface is not set\n # then create a new one\n m_card = pyw.devadd(w_card, mon_id, 'monitor')\n\n # remove obsolete interface\n pyw.devdel(w_card)\n\n return m_card\n\n return None", "def router_port_list(mgr_or_client, router_id, *args, **kwargs):\n return router_interface_list(mgr_or_client, router_id, **kwargs)", "def get_monitor_obj(link):\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n monitor = monitors.find_one({'metadata.rss_link': link}, {'_id': 0})\n return monitor", "def _get_port(self):\n return self.__port", "def port(self, **kw):\n return self.portType(**kw)", "def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit", "def port_show(switch, port):\n print client.port.show(switch, port)", "def get_monitor_info_w(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoW, h_monitor)", "def get_cmd_port(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetCmdPort', self.handle)", "def wm_dial(self):\n return self.get_par(\"dial_readback\")", "def monitor(self, *args, **kwargs):\n kwargs['logger'] = self\n return Monitor(*args, **kwargs)", "def cmd_port(args):", "def monitors_read_element(self) -> str:\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0)))\n return result.value.decode('ascii')", "def ms_get_management_vlan(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n textarea_value = page_utils.get_input_var_value(\n self.get_page(),\n var_id='node_group_management_vlan')\n return textarea_value", "def get_current_viewport(self,*args):\n vp_string = self.run_cmd(['xprop', '-root', \n '-notype', '_NET_DESKTOP_VIEWPORT'])\n vp_list=vp_string.decode().strip().split('=')[1].split(',')\n return tuple( int(i) for i in vp_list )", "def probe_ports( self, ):\r\n ports = self.com_driver.list_available()\r\n self.gui.print_info_string( \"\" )\r\n self.gui.print_info_string( \"Reported Ports from driver:\" )\r\n self.gui.print_info_string( \"\" )\r\n if len( ports ) == 0:\r\n self.gui.print_info_string( \"None \\n\" )\r\n else:\r\n for i_port in ports:\r\n self.gui.print_info_string( i_port[0] )\r\n #self.gui.print_info_string( \"\\n\" )\r\n\r\n self.close_driver()\r\n\r\n self.gui.print_info_string( \"\\nProbe Ports from parameters:\\n\" )\r\n ports = self.com_driver.probe_available( self.parameters.port_list )\r\n ix_line = 0 # what is this ??\r\n for i_port in ports:\r\n ix_line += 1\r\n self.gui.print_info_string( str( i_port ) )\r\n if ix_line == 10:\r\n ix_line = 0\r\n self.gui.print_info_string( \"\\n\" )\r\n #logger.log( fll, a_str )\r\n\r\n return", "def getMonitors(self):\n return [self.monitor]", "def Port(self) -> int:", "def get_vnc_port(self):\n\t\troot = self.get_xml()\n\t\t# get the VNC port\n\t\tgraphics = root.find('./devices/graphics')\n\t\tport = graphics.get('port')\n\t\treturn port", "def get_switch_by_port(self, port_dn):\n port_mo = self.moDir.lookupByDn(port_dn)\n switch_sys_mo = self.moDir.lookupByDn(port_mo.parentDn)\n switch_mo = self.moDir.lookupByDn(switch_sys_mo.parentDn)\n return switch_mo", "def com_port():\n port = ListPortInfo(DEFAULT_PORT)\n port.serial_number = \"1234\"\n port.manufacturer = \"Virtual serial port\"\n port.device = DEFAULT_PORT\n port.description = \"Some serial port\"\n\n return port" ]
[ "0.5453936", "0.53950405", "0.52761954", "0.5250016", "0.5229539", "0.519867", "0.516409", "0.5136712", "0.5085675", "0.5085646", "0.5071541", "0.5059226", "0.5054339", "0.504533", "0.5014978", "0.49869946", "0.49751168", "0.4959505", "0.4931168", "0.49127468", "0.49117902", "0.49095348", "0.48927644", "0.48789963", "0.48788798", "0.4866219", "0.48624527", "0.4835551", "0.48274776", "0.48260635" ]
0.60370433
0
Updates the ethernetSettings for the given LI [Arguments]
def fusion_api_update_li_ethernet_settings(self, body=None, uri=None, api=None, headers=None): param = '/ethernetSettings' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_wifi_mode(args):\n pass\n \"\"\"+\n\n try:\n if args['mode'] == 'hotspot':\n logger.info('will enter hotspot mode')\n #TODO - Need to capture the line that contains interface [some lan id] and uncomment it.\n change_file_line(path.join('/etc', 'dhcpcd.conf'), \n interface_l1_res, 'interface {}\\n'.format()\n \n\n\n return True if args['silent'] else 'Ok'\n if args['mode'] == 'wi-fi':\n logger.info('will enter wi-fi mode')\n\n\n\n\n return True if args['silent'] else 'Ok'\n else:\n logger.error('Unknown wi-fi mode: {}'.format(args['mode']))\n return False if args['silent'] else 'ERROR'\n \n except:\n logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1]))\n return False if args['silent'] else 'ERROR'\n \"\"\"", "def setAdhocParameters(self, host, mode, **params):\n self.mode = mode\n latency = 10\n self.host = host\n #delay = 5 * distance\n try:\n options = dict( params )\n self.interface = options[ 'interface' ]\n except: \n self.interface = 'wlan0'\n \n bandwidth = wifiParameters.set_bw(mode)\n #self.host.cmd(host, \"tc qdisc replace dev %s-%s root netem rate %.2fmbit latency %.2fms delay %.2fms\" % (host, self.interface, rate, latency, delay)) \n self.host.cmd(\"tc qdisc add dev %s-%s root tbf rate %smbit latency %sms burst 1540\" % (str(host), self.interface, bandwidth, latency))", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def fusion_api_get_li_ethernet_settings(self, uri, api=None, headers=None):\n param = '/ethernetSettings'\n return self.li.get(uri=uri, api=api, headers=headers, param=param)", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def edit_config(self, parameter_type, parameter_value):\n\n assert isinstance(parameter_value, list), \"Parameter Value needs to be a list\"\n\n def change_interface_name():\n\n parameter_dictionary = {'a': 'config',\n parameter_type: [netconf_server_namespace, {parameter_value[0]:parameter_value[1]}]}\n xml, tags = dictToXML(parameter_dictionary, [root_namespace, netconf_server_namespace])\n config_data = wrap_tags(xml, tags)\n\n\n try:\n\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username=netconf_server_username,\n password=netconf_server_password) as m:\n\n assert(\":validate\" in m.server_capabilities)\n m.edit_config(target='running', config=config_data)\n return m.get_config(source='running').data_xml\n\n except:\n return \"Can not establish connection with the server, something went wrong\"\n\n\n def set_experimenter():\n parameter_dictionary = {'a': 'config',\n parameter_type: [netconf_server_namespace, {parameter_type[0]: parameter_value[1]}]}\n xml, tags = dictToXML(parameter_dictionary, [root_namespace, netconf_server_namespace])\n config_data = wrap_tags(xml, tags)\n\n try:\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username= netconf_server_username,\n password=netconf_server_password) as m:\n\n assert(\":validate\" in m.server_capabilities)\n m.edit_config(target='running', config=config_data)\n return m.get_config(source='running').data_xml\n except:\n return \"Can not establish connection with the server, something went wrong\"\n\n functions = {'change': change_interface_name,\n 'experimenter': set_experimenter}\n\n if parameter_type in ['interface', 'interfaces']:\n return functions['change']()\n\n if parameter_type in ['experimenter', 'experiment', 'properties']:\n return functions['experimenter']()", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def update_settings(self, settings_list):\n for i, x in enumerate(settings_list):\n self.update_settings_at_index(settings=x, index=i)", "def update_based_on_topology(self, *args, **kwargs):\n for bfr in Configuration.get(\"switches\"):\n switch = bfr[\"name\"]\n\n self.update_bier_decap_rule(switch=switch)", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)", "def ra_llc_configuration_set(host_id, llc_configuration_fields, llc_configuration_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n result = ''\n err1 = [0, 0, 0, 0, 0]\n form_name = ['ARQ Mode', 'ArqWin(Retransmit Window Size)', 'Frame Loss Threshold',\n 'Leaky Bucket Timer', 'Frame Loss Time Out']\n param = []\n dictarr = []\n resultarray = {}\n param.append('llcArqEnable.1')\n param.append('arqWin.1')\n param.append('frameLossThreshold.1')\n param.append('leakyBucketTimerVal.1')\n param.append('frameLossTimeout.1')\n ra_llc_config = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n ra_llc_config = sqlalche_obj.session.query(SetOdu16RALlcConfTable).filter(\n SetOdu16RALlcConfTable.config_profile_id == device_param_list[0][4]).first()\n for i in range(len(llc_configuration_fields)):\n oidname = oid_name[llc_configuration_fields[i]]\n oidtype = oid_type[llc_configuration_fields[i]]\n oidvalue = llc_configuration_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n val = ''\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\"Values Updated in UBR LLC Form\", \"%s\" % (user_name))\n for j in range(0, len(llc_configuration_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = llc_configuration_param[j]\n dict[\"textbox\"] = llc_configuration_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n ra_llc_config.llc_arq_enable = llc_configuration_param[0]\n if err1[1] == 1:\n ra_llc_config.arq_win = llc_configuration_param[1]\n if err1[2] == 1:\n ra_llc_config.frame_loss_threshold = llc_configuration_param[2]\n if err1[3] == 1:\n ra_llc_config.leaky_bucket_timer_val = llc_configuration_param[3]\n if err1[4] == 1:\n ra_llc_config.frame_loss_timeout = llc_configuration_param[4]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16RALlcConfTable'\n resultarray['formAction'] = 'Llc_Cancel_Configuration.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def setup(wlan_type, wlan_name, alias, password, log_level):\n if alias is None:\n alias = click.prompt('Alias')\n if wlan_name is None:\n wlan_name = click.prompt(\"Wlan_name\")\n if wlan_type is None:\n wlan_type = click.prompt(\"Wlan-type\", type=click.Choice(['0', '1', '2', '3']))\n if wlan_type != '0' and password is None:\n password = getpass()\n setup_logging(log_level)\n wlan_type = int(wlan_type)\n tcp_setup(wlan_type, wlan_name, alias, password)", "def setEthaddr(self):\n\t\tself.ethaddr = self.settings.getKeyValue('ethaddr')\n\t\tself.socket.send('setenv ethaddr ' + self.ethaddr+'\\r', 1)\n\t\treturn None", "def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)", "def updateNetworkUplinkSettings(self, networkId: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['Uplink settings'],\n 'operation': 'updateNetworkUplinkSettings',\n }\n resource = f'/networks/{networkId}/uplinkSettings'\n\n body_params = ['bandwidthLimits']\n payload = {k: v for (k, v) in kwargs.items() if k in body_params}\n\n return self._session.put(metadata, resource, payload)", "def fusion_api_update_li_fcoe_settings(self, body=None, uri=None, api=None, headers=None):\n param = '/fcoeSettings'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def SetTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('setTapSettings', payload=payload, response_object=None)", "def write_xbee_settings():\n device.apply_changes()\n device.write_changes()", "def setInfraParameters(self, sta, mode, distance):\n station.mode(str(sta), mode)\n \n seconds = 3\n self.src = str(sta)\n try:\n \"\"\"Based on RandomPropagationDelayModel (ns3)\"\"\"\n seconds = abs(mobility.speed[self.src])\n except:\n pass\n self.host = sta\n latency = wifiParameters.latency(distance)\n loss = wifiParameters.loss(distance)\n delay = wifiParameters.delay(distance, seconds)\n bw = wifiParameters.bw(distance, mode) \n self.host.pexec(\"tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit loss %.1f%% latency %.2fms delay %.2fms\" % (sta, bw, loss, latency, delay)) \n #os.system('util/m %s tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit latency %.2fms delay %.2fms' % (self.host, self.host, bandwidth, latency, delay))\n #self.host.cmd(\"tc qdisc replace dev %s-wlan0 root tbf rate %.2fmbit latency %.2fms burst 15k\" % (self.host, rate, latency)) \n associate = self.doAssociation(mode, distance)\n if associate == False:\n mobility.handover(self.host)", "def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()", "def setup():\n jails = jails_list()\n\n jail_start_stop('stop', 'all') # stop ALL jail befor other action\n\n# Read jail.conf file \n jcs = open(jailconf, 'r')\n jcs_list = []\n for i in jcs:\n jcs_list.append(i)\n jcs.close()\n\n print \" \" \n set_menu = ['JADM', 'Settings']\n bz = [[\"Bridge interface:\", bridge_int], [\"Main zfs:\", jzfs]]\n print tabulate(bz, set_menu)\n print \" \"\n \n ch_choise = ['bridge', 'zfs', '!'] \n while True:\n choise = raw_input(\"change (bridge|zfs|!):> \")\n \n if choise == 'bridge':\n print \" \"\n \n br_interface = []\n bridges_sys = []\n gw_ipaddr = []\n gw_number = 0\n for i in netifaces.interfaces():\n if \"bridge\" in i:\n bridges_sys.append(i)\n \n br_count = 0\n for x in bridges_sys:\n try:\n bripadd = netifaces.ifaddresses(x)[netifaces.AF_INET]\n except:\n brake\n for i in bripadd:\n br_interface.append([' ', ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n br_count = br_count + 1\n br_interface[br_count - 1][1] = str(x)\n br_interface[br_count - 1][0] = str(gw_number)\n gw_number = gw_number + 1\n\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n print \" \"\n \n while True:\n brid = raw_input(\"bridge number(old: %s):> \" % (bridge_int))\n if brid == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n \n try:\n int(brid)\n except ValueError:\n msg = \" ERROR: slecet valid Bridge number (%s - %s)!\" % (0, len(bridges_sys) - 1)\n log(msg)\n continue\n \n if int(brid) >= len(bridges_sys):\n msg = \" ERROR: slecet valid Bridge number (%s - %s)!\" % (0, len(bridges_sys) - 1)\n log(msg)\n continue\n \n brid = bridges_sys[int(brid)]\n# check if we use the same brige\n if bridge_int == brid:\n log(\" INFO: bridge interface was not changed\")\n return False\n \n # update $bridge in jail.conf\n for i in jcs_list:\n if \"$bridge\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$bridge = \"%s\";\\n' % (brid)\n msg = \" WARNING: please modify all jails for new '%s' networks!\" % (brid)\n log(msg)\n break\n\n break\n break\n \n elif choise == 'zfs':\n print \" Check for ZFS zpools ...\"\n os.system(\"zpool list\")\n print \" \"\n os.system(\"zfs list\")\n log(\" WARNING: JADM will rename all existing jails zfs :WARNING\")\n print \" \"\n\n while True:\n chjzfs = raw_input(\"zpool/tank:> \")\n if chjzfs == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n if chjzfs == jzfs:\n msg = \" ERROR: '%s' is current zfs please choose different!\" % (chjzfs)\n log(msg)\n continue\n \n zfs = subprocess.check_output(\"zfs list -H -o name\", shell=True)\n zfs = zfs.split('\\n')\n if chjzfs in zfs:\n msg = \" INFO: We will use existing zpool/tank: %s\" % (chjzfs)\n log(msg)\n print \" WARNING: '%s' will be destroyed!\" % (chjzfs)\n yn = raw_input('use it anyway (yes):> ')\n if yn == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n# destroy existing ZFS\n if yn != \"yes\":\n log(\" INFO: Interrupted by user\")\n return False\n else:\n if os.WEXITSTATUS(os.system(\"zfs destroy -r %s\" % (chjzfs))) !=0:\n msg = \" ERROR:'%s' cannot be destroyed!\" % (chjzfs)\n log(msg)\n else:\n msg = \" WARNING:'%s' was destroyed!\" % (chjzfs)\n log(msg)\n ''''\n chjpath = subprocess.check_output('zfs list -H -o mountpoint %s' % chjzfs, shell = True)\n chjpath = chjpath.strip('\\n')\n# check if exsiting zfs tank have mount point\n if chjpath == 'none':\n print \" \"\n print \" WARNING: '%s' have '%s' for mount point\" % (chjzfs, chjpath)\n print \" WARNING: Please create mount point for '%s' or select different zroot/tank\" % chjzfs\n continue\n break\n '''\n if os.WEXITSTATUS(os.system(\"zfs create %s\" % (chjzfs))) != 0:\n print \" \"\n print \" ERROR: Please enter correct zfs!\"\n continue\n else:\n while True:\n chjpath = raw_input(\"%s mount point:> \" % (chjzfs))\n if chjpath == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n if chjpath == jpath:\n msg = \" ERROR: '%s' is current mount point please choose different!\" % (chjpath)\n log(msg)\n continue \n \n# check if $japth content '/' if not add it\n if chjpath[0] != '/':\n chjpath = \"/%s\" % chjpath\n if chjpath[-1] != '/':\n chjpath = \"%s/\" % chjpath\n \n# check if mount point exitst\n zfsmount = os.path.isdir(chjpath)\n if zfsmount == True:\n print \" \"\n print \" ERROR: %s mount point exist!\" % chjpath\n yn = raw_input('use it anyway (yes):> ')\n if 'yes' in yn:\n os.system('zfs set mountpoint=%s %s' % (chjpath, chjzfs))\n break\n else:\n continue\n else:\n os.system('zfs set mountpoint=%s %s' % (chjpath, chjzfs))\n break\n break\n\n# create BASE-RW\n if 'BASE' in jails[1]:\n if os.WEXITSTATUS(os.system(\"zfs create %s\" % (chjzfs+\"/BASE-RW\"))) != 0:\n msg = \" ERROR: '%s' cannot be created!\" % (chjzfs+\"/BASE-RW\")\n log(msg)\n return False\n else:\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (chjpath + \"BASE-RW\", chjzfs+\"/BASE-RW\"))) != 0:\n msg = \" ERROR: '%s' cannot be created!\" % (chjpath + \"BASE-RW\")\n log(msg)\n return False\n else:\n msg = \" INFO: '%s' was created!\" % (chjzfs+\"/BASE-RW\")\n log(msg)\n \n# try to rename all jails\n for i in jails[1]:\n \n orgJZFS = jzfs+\"/\"+i\n orgJPATH = jpath + i\n \n newJZFS = chjzfs+\"/\"+i\n newJPATH = chjpath + i\n# zfs fix BASE-\n if 'BASE-' in i:\n\n orgJZFS = jzfs+\"/BASE-RW/\"+i\n orgJPATH = jpath + \"BASE-RW/\" +i\n \n newJZFS = chjzfs+\"/BASE-RW/\"+i\n newBJPATH = newJPATH\n newJPATH = chjpath + \"BASE-RW/\" + i\n \n# rename jaisl zfs\n if os.WEXITSTATUS(os.system(\"zfs rename %s %s\" % (orgJZFS, newJZFS))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (orgJZFS, newJZFS)\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (orgJZFS, newJZFS, jzfs)\n log(msg)\n else:\n# zfs fix BASE-SKE:ETON\n if i =='BASE':\n if os.WEXITSTATUS(os.system(\"zfs rename %s %s\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON'))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON')\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON', jzfs)\n log(msg)\n else:\n msg = \" INFO: '%s' was rename to '%s'\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON')\n log(msg)\n \n print \" INFO: '%s' was rename to '%s'\" % (orgJZFS, newJZFS)\n# rename jails mountpoint\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (newJPATH, newJZFS))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (orgJPATH, newJPATH)\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (orgJPATH, newJPATH, jpath)\n log(msg)\n else:\n# mount point fix BASE-SKELETON\n if i =='BASE':\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (chjpath + i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON'))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON')\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON', jzfs)\n log(msg)\n else:\n msg = \" INFO: '%s' was rename to '%s'\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON')\n log(msg)\n# create mount folder for BASE- jail\n if 'BASE-' in i:\n os.system('mkdir -p %s/%s' % (newBJPATH, i))\n msg = (\" INFO: '%s/%s' was created\" % (newBJPATH, i))\n log(msg)\n \n# update BASE- jail mount.fstab and /etc/fstab\n fj = find_jail(i)\n jb = fj[0]\n je = fj[1]\n jcl = fj[2]\n\n dt = str(datetime.now()) \n jcs_list[jb+2] = '# modified on %s by ... JADM ...\\n' % (dt)\n\n# check if jail mark as BASE skeleton model and fix mount 'exec.prestart +=' local options\n os.system('echo \\\"%sBASE %s nullfs ro 0 0\\\" > %s/etc/fstab' % (chjpath, newBJPATH, newJPATH))\n os.system('echo \\\"%s %s%s/SROOT nullfs rw 0 0\\\" >> %s/etc/fstab' % (newJPATH, chjpath, i, newJPATH))\n# check if is vnet\n if 'vnet;' in jcs_list[jb+8]:\n jcs_list[jb+23] = 'mount.fstab=\"%s/etc/fstab\";\\n' % (newJPATH)\n else:\n jcs_list[jb+12] = 'mount.fstab=\"%s/etc/fstab\";\\n' % (newJPATH)\n \n msg = \" INFO: '%s' was rename to '%s'\" % (orgJPATH, newJPATH)\n log(msg)\n\n jzfsyes = \"\"\n jzfsyes = raw_input(\"destroy old zfs '%s' (yes only):> \" % (jzfs))\n if jzfsyes == \"yes\":\n if os.WEXITSTATUS(os.system(\"zfs destroy -r %s\" % (jzfs))) !=0:\n msg = \" ERROR:'%s' cannot be destroyed!\" % (jzfs)\n log(msg)\n else:\n os.system('chflags -R 0 %s' % jpath)\n os.system('rm -rf %s' % jpath)\n msg = \" WARNING:'%s' was destroyed!\" % (jzfs)\n log(msg)\n elif jzfsyes != \"yes\":\n msg = \" INFO: '%s' was keeped!\" % (jzfs)\n log(msg)\n\n# update $jedir in jail.conf\n for i in jcs_list:\n if \"$jzfs\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$jzfs = \"%s\";\\n' % (chjzfs)\n break\n \n for i in jcs_list:\n if \"$jedir\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$jedir = \"%s\";\\n' % (chjpath)\n break\n\n break\n \n elif choise == '!':\n log(\" INFO: Interrupted by user\")\n return False\n else:\n log(\" INFO: To change setting type 'bridge', 'zfs' or '!' for exit\")\n \n# check if jail.conf exist\n check_jailconf = os.path.isfile(jailconf)\n if check_jailconf == True:\n dt = datetime.now().strftime(\"%d_%m_%y_%I%M%S\")\n os.system(\"cp %s %s\" % (jailconf, jailconf+\".\"+dt))\n msg = \" INFO: make a backup: %s\" % (jailconf+\".\"+dt)\n log(msg)\n\n# write jail.conf file\n jcs = open(jailconf, 'w+')\n for i in jcs_list:\n jcs.write(i)\n jcs.close()\n\n def_vars() \n print \" \"\n set_menu = ['JADM', 'Settings']\n bz = [[\"Bridge interface:\", bridge_int], [\"Main zfs:\", jzfs]]\n print tabulate(bz, set_menu)\n \n# print and add to log file \n log(\" WARNING: Jadm SETUP was modified\")", "def set_config(self, settings='settings.json'): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['photo1'] = self.photo1.get()\n self.settings['photo2'] = self.photo2.get()\n self.settings['smc1'] = self.smc1.get()\n self.settings['smc2'] = self.smc2.get()\n self.settings['smc3'] = self.smc3.get()\n self.settings['smc4'] = self.smc4.get()\n self.settings['watering'] = self.watering.get()\n self.settings['cycle'] = self.cycle.get()\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), settings)\n if os.path.exists(settings_path):\n with open(settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def ra_config_set(host_id, ra_config_fields, ra_config_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n ra_config = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n ra_config = sqlalche_obj.session.query(SetOdu16RAConfTable).filter(\n SetOdu16RAConfTable.config_profile_id == device_param_list[0][4]).first()\n snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], '.1.3.6.1.4.1.26149.2.2.13.1.1.2.1', 'i', '0')\n for i in range(len(ra_config_fields)):\n oidname = oid_name[ra_config_fields[i]]\n oidtype = oid_type[ra_config_param[i]]\n oidvalue = ra_config_param[i]\n result = snmp_set(device_param_list[0][0], device_param_list[0][1],\n device_param_list[0][2], device_param_list[0][3], oidname, oidtype, oidvalue)\n ra_config.acl_mode = ra_config_param[0]\n ra_config.ssid = ra_config_param[1]\n sqlalche_obj.session.add(ra_config)\n sqlalche_obj.session.commit()\n snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], '.1.3.6.1.4.1.26149.2.2.13.1.1.2.1', 'i', '1')\n sqlalche_obj.sql_alchemy_db_connection_close()\n return result", "def update_host_config(self, hostid, config, **kwargs):\n pass", "def network_settings():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSDomainName <Domain Name>\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSDomainName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSServer1 \"+colo_dns[DEFAULT_COLO ][0])\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSServer1 failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgLanNetworking -o cfgDNSServer2 \"+colo_dns[DEFAULT_COLO ][1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DNSServer2 failed \")", "def update_zcs_settings(session, network, lowport, highport,\n return_type=None, **kwargs):\n verify_low_high_port(lowport, highport)\n\n body_values = {'network': network, 'lowport': lowport,\n 'highport': highport}\n\n path = '/api/settings/container_service.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)", "def fusion_api_reapply_li_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.li.update(body=None, uri=uri, api=api, headers=headers, param=param)", "def update_port_ip_address(self):\n leases = None\n req = dict(ip='0.0.0.0')\n instances = self.get_vms_for_this_req(**req)\n if instances is None:\n return\n\n for vm in instances:\n if not leases:\n # For the first time finding the leases file.\n leases = self._get_ip_leases()\n if not leases:\n # File does not exist.\n return\n\n for line in leases:\n if line.startswith('lease') and line.endswith('{\\n'):\n ip_addr = line.split()[1]\n if 'hardware ethernet' in line:\n if vm.mac == line.replace(';', '').split()[2]:\n LOG.info(_LI('Find IP address %(ip)s for %(mac)s'),\n {'ip': ip_addr, 'mac': vm.mac})\n try:\n rule_info = dict(ip=ip_addr, mac=vm.mac,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update\"\n \"rules.\"))\n else:\n params = dict(columns=dict(ip=ip_addr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ip_addr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to '\n 'agent.'))" ]
[ "0.5477637", "0.5466509", "0.537458", "0.5326889", "0.53104806", "0.5290903", "0.5283721", "0.528293", "0.52247196", "0.5203151", "0.517094", "0.5163822", "0.51587296", "0.51508045", "0.5110261", "0.50629586", "0.50230855", "0.50227296", "0.4994447", "0.49914682", "0.49887642", "0.4983858", "0.49609664", "0.4944963", "0.4939808", "0.49080643", "0.489869", "0.48968405", "0.48743722", "0.4871659" ]
0.71327585
0
Updates the fcoeSettings for the given LI [Arguments]
def fusion_api_update_li_fcoe_settings(self, body=None, uri=None, api=None, headers=None): param = '/fcoeSettings' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, ff, **kwargs):\n from .symbolic import call_genfunction\n\n args = self._get_args(ff)\n args.update(kwargs)\n call_genfunction(self.function, args) # pylint: disable=no-member", "def update_coeff(self, **kwargs: float) -> None:\n for rule_name, coeff in kwargs.items():\n if rule_name not in self.rules:\n raise ValueError(f\"Behavioral rule {rule_name} does not exist\")\n else:\n self.rules[getattr(self, rule_name)] = coeff", "def updateCoeff(self, **args):\n for par in args:\n self.rateCoeffMeta[par] = args[par]\n meta = self.rateCoeffMeta\n if self.rateCoeffMeta['type'] ==\"constant\":\n self.k = cp.k_const(meta['k'])\n elif self.rateCoeffMeta['type'] ==\"Arrhenius\":\n self.k = cp.k_arr(meta['A'], meta['E'], meta['T'], meta['R'])\n elif self.rateCoeffMeta['type'] ==\"modifiedArrhenius\":\n self.k = cp.k_mod_arr(meta['A'], meta['b'], meta['E'], meta['T'], meta['R'])\n else:\n # Other type of reaction rate coeff\n self.k = None # k = cp.newMethodToComputeK(...)\n return", "def update_settings(self, **kwargs):\n\n # get arguments\n max_iter = kwargs.pop('max_iter', None)\n eps_abs = kwargs.pop('eps_abs', None)\n eps_rel = kwargs.pop('eps_rel', None)\n eps_prim_inf = kwargs.pop('eps_prim_inf', None)\n eps_dual_inf = kwargs.pop('eps_dual_inf', None)\n rho = kwargs.pop('rho', None)\n alpha = kwargs.pop('alpha', None)\n delta = kwargs.pop('delta', None)\n polish = kwargs.pop('polish', None)\n polish_refine_iter = kwargs.pop('polish_refine_iter', None)\n verbose = kwargs.pop('verbose', None)\n scaled_termination = kwargs.pop('scaled_termination', None)\n check_termination = kwargs.pop('check_termination', None)\n warm_start = kwargs.pop('warm_start', None)\n time_limit = kwargs.pop('time_limit', None)\n\n # update them\n if max_iter is not None:\n self._model.update_max_iter(max_iter)\n\n if eps_abs is not None:\n self._model.update_eps_abs(eps_abs)\n\n if eps_rel is not None:\n self._model.update_eps_rel(eps_rel)\n\n if eps_prim_inf is not None:\n self._model.update_eps_prim_inf(eps_prim_inf)\n\n if eps_dual_inf is not None:\n self._model.update_eps_dual_inf(eps_dual_inf)\n\n if rho is not None:\n self._model.update_rho(rho)\n\n if alpha is not None:\n self._model.update_alpha(alpha)\n\n if delta is not None:\n self._model.update_delta(delta)\n\n if polish is not None:\n self._model.update_polish(polish)\n\n if polish_refine_iter is not None:\n self._model.update_polish_refine_iter(polish_refine_iter)\n\n if verbose is not None:\n self._model.update_verbose(verbose)\n\n if scaled_termination is not None:\n self._model.update_scaled_termination(scaled_termination)\n\n if check_termination is not None:\n self._model.update_check_termination(check_termination)\n\n if warm_start is not None:\n self._model.update_warm_start(warm_start)\n\n if time_limit is not None:\n self._model.update_time_limit(time_limit)\n\n if max_iter is None and \\\n eps_abs is None and \\\n eps_rel is None and \\\n eps_prim_inf is None and \\\n eps_dual_inf is None and \\\n rho is None and \\\n alpha is None and \\\n delta is None and \\\n polish is None and \\\n polish_refine_iter is None and \\\n verbose is None and \\\n scaled_termination is None and \\\n check_termination is None and \\\n warm_start is None:\n raise ValueError(\"No updatable settings has been specified!\")", "def update_settings(gui):\n\n settings[\"filename\"] = gui.lineEdit_fname.text()\n ind = gui.comboBox_language.currentIndex()\n settings[\"language\"] = gui.comboBox_language.itemText(ind)\n settings[\"optimization_level\"] = gui.comboBox_optimization_level\\\n .currentIndex()\n\n if settings[\"optimization_level\"] <= 0:\n gui.checkBox_fk_x.setChecked(True)\n gui.checkBox_fk_y.setChecked(True)\n gui.checkBox_fk_z.setChecked(True)\n gui.checkBox_fk_orientation.setChecked(True)\n gui.checkBox_jac_x.setChecked(True)\n gui.checkBox_jac_y.setChecked(True)\n gui.checkBox_jac_z.setChecked(True)\n gui.checkBox_jac_wx.setChecked(True)\n gui.checkBox_jac_wy.setChecked(True)\n gui.checkBox_jac_wz.setChecked(True)\n gui.checkBox_com_x.setChecked(True)\n gui.checkBox_com_y.setChecked(True)\n gui.checkBox_com_z.setChecked(True)\n gui.checkBox_com_jac_x.setChecked(True)\n gui.checkBox_com_jac_y.setChecked(True)\n gui.checkBox_com_jac_z.setChecked(True)\n\n gui.checkBox_fk_x.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_fk_y.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_fk_z.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_fk_orientation\\\n .setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_x.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_y.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_z.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_wx.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_wy.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_jac_wz.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_x.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_y.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_z.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_jac_x.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_jac_y.setEnabled(settings[\"optimization_level\"] > 0)\n gui.checkBox_com_jac_z.setEnabled(settings[\"optimization_level\"] > 0)\n\n mat = settings[\"language\"].lower() == \"matlab\"\n gui.checkBox_loops_constraints.setEnabled(mat)\n gui.checkBox_loops_constraints.setChecked(mat)", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def update(self, val, feats):\n raise NotImplementedError", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def update(*args):", "def edit_settings(self):\n while True:\n os.system('cls' if os.name == 'nt' else 'clear')\n valid_numbers, number_setting_corr = self.print_settings()\n print('Which setting you want to change? Enter \"number, new value\" to modify, or \"done\" to exit.')\n print('Observe the possible values for each setting! They are case sensitive. '\n 'Inputting wrong values might break the program. \\n')\n choice = input('Input:')\n if choice == 'done':\n break\n if ',' not in choice:\n print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')\n continue\n if len(choice.split(',')) != 2:\n print('Invalid input, must have only one comma')\n continue\n\n var, val = choice.split(',')\n if var not in valid_numbers:\n print('Invalid number.')\n continue\n real_var = number_setting_corr[var] # Changes from a number to the actual parameter\n if val.lower() == 'true':\n setattr(self, real_var, True)\n continue\n elif val.lower() == 'false':\n setattr(self, real_var, False)\n continue\n else:\n setattr(self, real_var, val)\n\n # todo: check for all possible values to avoid inputting wrong settings and messing everything up.\n # if val not in valid_options_nl_sorting:\n # print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in valid_options_lin_sorting:\n # print('Invalid linear sorting option. Case sensitive! Be very precise.')\n # continue\n # if val not in models:\n # print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')\n # continue\n\n print('===Final settings===')\n _, _ = self.print_settings()\n self.save_settings()\n return", "def update(self, parameters):\n self.set_frequencies(parameters) # f_i\n self.set_coupling_weights(parameters) # w_ij\n self.set_phase_bias(parameters) # theta_i\n self.set_amplitudes_rate(parameters) # a_i\n self.set_nominal_amplitudes(parameters) # R_i", "def update_settings(self, settings_list):\n for i, x in enumerate(settings_list):\n self.update_settings_at_index(settings=x, index=i)", "def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config", "def change_fleurinp(self):\n self.report('INFO: run change_fleurinp')\n\n if self.ctx.scf_needed:\n try:\n fleurin = self.ctx.reference.outputs.fleurinp\n except NotExistent:\n error = 'Fleurinp generated in the reference calculation is not found.'\n self.control_end_wc(error)\n return self.exit_codes.ERROR_REFERENCE_CALCULATION_FAILED\n else:\n if 'fleurinp' in self.inputs:\n fleurin = self.inputs.fleurinp\n else:\n # In this case only remote is given\n # fleurinp data has to be generated from the remote inp.xml file\n fleurin = get_fleurinp_from_remote_data(self.inputs.remote)\n\n # copy inpchanges from wf parameters\n with inpxml_changes(self.ctx.wf_dict) as fm:\n fm.set_complex_tag('DMI', {\n 'qVectors': {\n 'q': self.ctx.wf_dict['q_vectors']\n },\n 'theta': self.ctx.wf_dict['sqas_theta'],\n 'phi': self.ctx.wf_dict['sqas_phi']\n },\n create=True)\n fm.set_inpchanges({\n 'itmax': 1,\n 'l_noco': True,\n 'ctail': False,\n 'spav': True,\n # 'l_soc': True,\n 'l_ss': True\n })\n\n if self.ctx.wf_dict['kmesh_force_theorem'] is not None:\n kmesh = KpointsData()\n kmesh.set_kpoints(monkhorst_pack(self.ctx.wf_dict['kmesh_force_theorem']))\n kmesh.store()\n fm.set_kpointsdata(kmesh.uuid, switch=True, kpoint_type='mesh')\n\n # change beta parameter\n for key, val in self.ctx.wf_dict['beta'].items():\n fm.set_atomgroup_label(key, {'nocoParams': {'beta': val}})\n\n # switch off SOC on an atom specie\n for atom_label in self.ctx.wf_dict['soc_off']:\n fm.set_species_label(\n atom_label,\n {'special': {\n 'socscale': 0.0\n }},\n )\n\n fleurmode = FleurinpModifier(fleurin)\n try:\n fleurmode.add_task_list(self.ctx.wf_dict['inpxml_changes'])\n except (ValueError, TypeError) as exc:\n error = ('ERROR: Changing the inp.xml file failed. Tried to apply inpxml_changes'\n f', which failed with {exc}. I abort, good luck next time!')\n self.control_end_wc(error)\n return self.exit_codes.ERROR_CHANGING_FLEURINPUT_FAILED\n\n # validate?\n try:\n fleurmode.show(display=False, validate=True)\n except etree.DocumentInvalid:\n error = ('ERROR: input, user wanted inp.xml changes did not validate')\n self.report(error)\n return self.exit_codes.ERROR_INVALID_INPUT_FILE\n except ValueError as exc:\n error = ('ERROR: input, user wanted inp.xml changes could not be applied.'\n f'The following error was raised {exc}')\n self.control_end_wc(error)\n return self.exit_codes.ERROR_CHANGING_FLEURINPUT_FAILED\n\n # apply\n out = fleurmode.freeze()\n self.ctx.fleurinp = out", "def update_parameters(parameters, grads, learning_rate):\n pass", "def settings_f(self, settings):\n\n self._set_list_field(\"settings\", settings)", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def new_param(ini_file='new_param.ini'):\n ### READ THE INI FILE ###\n config.read(ini_file)\n print 'Read the file ',ini_file\n ##~~~~~~ file_in ~~~~~~##\n file_in=config.get('file_in','file_in')\n ##~~~~~~ file_out ~~~~~~##\n file_out=config.get('file_out','file_out')\n\n ##~~~~~~ factor_values ~~~~~##\n fac_L=config.getfloat('factor_values','fac_L')\n fac_Ks=config.getfloat('factor_values','fac_KS')\n fac_n_o=config.getfloat('factor_values','fac_n_o')\n fac_n_c=config.getfloat('factor_values','fac_n_c')\n\n ##~~~~~~ new_initial_values ~~~~~##\n new_pVs_t0=config.getfloat('new_initial_values','new_pVs_t0')\n new_Vo_t0=config.getfloat('new_initial_values','new_Vo_t0')\n new_Qc_t0=config.getfloat('new_initial_values','new_Qc_t0')\n\n ##~~~~~~ flags ~~~~~~##\n nb_param=config.getfloat('flags','nb_param')\n\n #Reading of parameter file\n print 'Reading parameter file'\n ar_cell_label,ar_coorx,ar_coory,ar_lambda,ar_Xc,ar_dam,ar_tan_beta,ar_tan_beta_channel,ar_L,ar_Ks,\\\n ar_theta_r,ar_theta_s,ar_n_o,ar_n_c,\\\n ar_cell_down,ar_pVs_t0,ar_Vo_t0,ar_Qc_t0,ar_kc\\\n =pm.read_cell_parameters(file_in)\n\n #~~~~~~Change in parameters~~~~~~#\n #Multiplying factors for L, Ks, n_o and n_c\n if fac_L!=1.:\n print 'Change L'\n ar_L=ar_L*fac_L\n if fac_Ks!=1.:\n print 'Change Ks'\n ar_Ks=ar_Ks*fac_Ks\n if fac_n_o!=1.:\n print 'Change n_o'\n ar_n_o=ar_n_o*fac_n_o\n if fac_n_c!=1.:\n print 'Change n_c'\n ar_n_c=ar_n_c*fac_n_c\n #New values for pVs_t0, Vo_t0 and Qc_t0\n if new_pVs_t0!=ar_pVs_t0[0]:\n print 'Change pVs_t0'\n ar_pVs_t0=ar_pVs_t0*0.+new_pVs_t0\n if new_Vo_t0!=ar_Vo_t0[0]:\n print 'Change pVs_t0'\n ar_Vo_t0=ar_Vo_t0*0.+new_Vo_t0\n if new_Qc_t0!=ar_Qc_t0[0]:\n print 'Change pVc_t0'\n ar_Qc_t0=ar_Qc_t0*0.+new_Qc_t0\n\n #~~~~~~Write parameter file~~~~~~#\n tab_param=np.zeros((len(ar_cell_label),nb_param))\n tab_param[:,0]=ar_cell_label\n tab_param[:,1]=ar_coorx\n tab_param[:,2]=ar_coory\n tab_param[:,3]=ar_lambda\n tab_param[:,4]=ar_Xc\n tab_param[:,5]=ar_dam\n tab_param[:,6]=ar_tan_beta\n tab_param[:,7]=ar_tan_beta_channel\n tab_param[:,8]=ar_L\n tab_param[:,9]=ar_Ks\n tab_param[:,10]=ar_theta_r\n tab_param[:,11]=ar_theta_s\n tab_param[:,12]=ar_n_o\n tab_param[:,13]=ar_n_c\n tab_param[:,14]=ar_cell_down\n tab_param[:,15]=ar_pVs_t0\n tab_param[:,16]=ar_Vo_t0\n tab_param[:,17]=ar_Qc_t0\n tab_param[:,18]=ar_kc\n\n np.savetxt(file_out, tab_param)", "def update_settings( what_to_do, settings_inst ):\n from settings import smart_update\n from _settings import settings\n\n smart_update(settings_inst, settings)\n # ok, we want to have parallel\n if what_to_do == \"wikis_to_huge_math\":\n settings_inst[\"input\"] = settings_inst[\"wiki\"][\"xml\"]\n # there are too few so each process should take only 1\n settings_inst[\"parallel\"][\"chunksize\"] = 1", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def update(params: hk.Params, opt_state: OptState, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> Tuple[hk.Params, OptState]:\n grads = jax.grad(loss)(params, batch, labels, xent_weight, l1_coeff, l2_coeff)\n updates, opt_state = opt.update(grads, opt_state)\n new_params = optax.apply_updates(params, updates)", "def setSolverTau(*argv):", "def update_value(self, (features, action), parameters):\n features = np.array(features)\n new_value = features.dot(parameters)\n features = list(features)\n\n self.__setitem__((features, action), new_value)", "def update_likelihood_approximation(self, **kwargs):\r\n self.likelihood.restart()\r\n self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0, **kwargs)\r\n self._set_params(self._get_params())", "def update_config(config, args):\n if args.cfg:\n _update_config_from_file(config, args.cfg)\n config.defrost()\n if args.dataset:\n config.DATA.DATASET = args.dataset\n if args.batch_size:\n config.DATA.BATCH_SIZE = args.batch_size\n config.DATA.BATCH_SIZE_EVAL = args.batch_size\n if args.batch_size_eval:\n config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval\n if args.image_size:\n config.DATA.IMAGE_SIZE = args.image_size\n if args.accum_iter:\n config.TRAIN.ACCUM_ITER = args.accum_iter\n if args.data_path:\n config.DATA.DATA_PATH = args.data_path\n if args.output:\n config.SAVE = args.output\n if args.eval:\n config.EVAL = True\n if args.pretrained:\n config.MODEL.PRETRAINED = args.pretrained\n if args.resume:\n config.MODEL.RESUME = args.resume\n if args.last_epoch:\n config.TRAIN.LAST_EPOCH = args.last_epoch\n if args.amp: # only for training\n config.AMP = not config.EVAL\n config.freeze()\n return config", "def update(self, args):\n pass", "def set_coefs(self, sites, values):\n self.set_coefs_sites(sites)\n self.epistasis.data.values = values\n self.build()\n return self", "def updateParameters(self):\n\n return", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters" ]
[ "0.60227436", "0.57024354", "0.5569949", "0.55639017", "0.5491466", "0.54674345", "0.54021394", "0.5397958", "0.5397819", "0.52829903", "0.527833", "0.52733487", "0.52227044", "0.5200642", "0.5179809", "0.51742566", "0.51651156", "0.5120121", "0.51198685", "0.511294", "0.5111121", "0.5094797", "0.5092131", "0.5072879", "0.5066122", "0.5054058", "0.5052855", "0.5046052", "0.5042054", "0.50189596" ]
0.5842247
1
Updates the internalNetworks for the given LI [Arguments]
def fusion_api_update_li_internal_networks(self, body=None, uri=None, api=None, headers=None): param = '/internalNetworks' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def assign_networks(cls, instance, networks):\n instance.assigned_networks_list = networks\n db().flush()", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def update_net(self) -> None:\n self.units.update_net()", "def _update_module_target_networks(\n self, module_id: ModuleID, hps: AppoLearnerHyperparameters\n ) -> None:", "def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a", "def network_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n networks = self.client.networks.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n continue\n\n if len(networks) == 0:\n Console.info(\"No network exist\" + host['Ip'])\n continue\n\n for networkm in networks:\n network = networkm.__dict__['attrs']\n network['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(network)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n r = Rest.delete('Network', filter)\n r = Rest.post('Network', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def propagate_networks(networks, pool):\n for network in networks:\n network.forward_propagation()\n pool.append(network)", "def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def update_nets_with_vias(pcb_data: List[Dict[str, Any]], nets: List[Net]):\n vias = get_all_dicts_by_key(pcb_data, 'via')\n for via in vias:\n at: Coords = get_dict_by_key(via['via'], 'at')['at']\n at[1] = str(-1*float(at[1]))\n size: str = get_dict_by_key(via['via'], 'size')['size']\n layer_data: str = get_dict_by_key(via['via'], 'layers')['layers']\n layers: List[Layer] = convert_to_layers(layer_data)\n new_via: Via = Via(center=at, size=size, layers=layers)\n net_id: str = get_dict_by_key(via['via'], 'net')['net']\n for net in nets:\n if float(net.net_id) == float(net_id):\n net.vias.append(new_via)", "def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def test_networking_project_network_update(self):\n pass", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)", "def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes", "def update_networks(self, agent, force_hard=False):\n\n if self.update_type == \"soft\" and not force_hard:\n self._soft_update(agent.actor, agent.actor_target)\n self._soft_update(agent.critic, agent.critic_target)\n elif self.t_step % self.C == 0 or force_hard:\n self._hard_update(agent.actor, agent.actor_target)\n self._hard_update(agent.critic, agent.critic_target)", "def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def add_networks(networks=None,\n skip_generating_certificates=False,\n verbose=False,\n config_file=None):\n setup_console_logger(verbose)\n config.load_config(config_file)\n logger.info('Trying to add new networks to Manager...')\n\n networks = json.loads(networks)\n _validate_networks(networks)\n metadata = load_cert_metadata()\n\n _update_metadata_file(metadata, networks)\n if not skip_generating_certificates:\n create_internal_certs()\n\n script_path = join(SCRIPT_DIR, 'update-manager-networks.py')\n hostname = metadata.get('hostname') or _get_hostname()\n args = [\n '--hostname', hostname,\n '--networks', json.dumps(networks),\n ]\n if bool(metadata.get('broker_addresses')):\n # if we store broker addresses in the metadata file, that means we\n # have a local broker and must update that too\n args.append('--broker')\n\n run_script_on_manager_venv(script_path, script_args=args)\n\n logger.notice('New networks were added successfully. Please restart the'\n ' following services: `nginx`, `cloudify-mgmtworker`,'\n '`cloudify-rabbitmq`')", "def update_interfaces(self, interfaces):\n for i in interfaces:\n self.update_interface(i)", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def fusion_api_update_li_ethernet_settings(self, body=None, uri=None, api=None, headers=None):\n param = '/ethernetSettings'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError" ]
[ "0.6205298", "0.60175586", "0.59753895", "0.59357196", "0.586205", "0.58562976", "0.57563895", "0.558638", "0.55648595", "0.5520264", "0.5353721", "0.52993184", "0.5266888", "0.5258318", "0.52572656", "0.5254502", "0.5246816", "0.5242387", "0.5241014", "0.52166176", "0.5209664", "0.5192997", "0.51540846", "0.51230526", "0.5120159", "0.51003075", "0.50851536", "0.5083048", "0.5058999", "0.50508624" ]
0.74014723
0
Updates the Port Monitor for the given LI [Arguments]
def fusion_api_update_li_port_monitor_configuration(self, body=None, uri=None, api=None, headers=None): param = '/port-monitor' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def cmd_port(args):", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def update(self, args):\n pass", "def update(*args):", "def run(self, *arg, **kw):\n self.dirty = False\n for port in self.inputs:\n self.get_input_data(port)", "def setMonitorParam(self, monName, *params):\n monitorRef = self._ShREEKMonitors.get(monName, None)\n if monitorRef == None:\n msg = \"Tried to configure Non-existent monitor:\"\n msg += \"\\n%s\\n\" % monName\n msg += \"Existing Monitors:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n MissingMonitor = monName,\n ValidMonitors = self._ShREEKMonitors.keys())\n monitorRef.addPositionalArg(*params)\n return", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def modify_ports(self, ports, **kwargs):\n pass", "def update(clients, context, name=None):\n port_id = context['port_id']\n logger.info(\"Taking action port.update {}.\".format(port_id))\n neutron = clients.get_neutron()\n body = {'port': {}}\n if name is not None:\n body['port']['name'] = name\n neutron.update_port(port_id, body=body)", "def Update(self,n,l):\n\t\tself.n = n\n\t\tself.l = l", "def save_io_ports(self, *args):\n if args[0] == 'Save':\n title = args[1].title\n text = args[1].text_field.text\n try:\n port = int(text, 16)\n if port < 0 or port > 4095:\n toast('Invalid port number. Valid port numbers [0-4095]')\n else:\n if is_valid_port(port):\n hex_port = convert_to_hex(port, 12)\n if TRAFFIC_LIGHT['menu_title'] in title:\n update_reserved_ports(TRAFFIC_LIGHT,\n TRAFFIC_LIGHT['port'],\n hex_port)\n self.traffic_lights.text = TRAFFIC_LIGHT['menu_title'] + '. Current Port: ' + str(\n TRAFFIC_LIGHT['port'])\n toast_message = f'Changed Traffic Light I/O port number to {port}'\n elif SEVEN_SEGMENT_DISPLAY['menu_title'] in title:\n update_reserved_ports(SEVEN_SEGMENT_DISPLAY,\n SEVEN_SEGMENT_DISPLAY['port'],\n hex_port)\n self.seven_segment.text = SEVEN_SEGMENT_DISPLAY['menu_title'] + '. Current Port: ' + str(\n SEVEN_SEGMENT_DISPLAY['port'])\n toast_message = f'Changed Seven Segment I/O port number to {port}'\n elif ASCII_TABLE['menu_title'] in title:\n if port > 4088:\n toast_message = 'Invalid port for ASCII Table. Valid ports [0-4088]'\n else:\n try:\n update_reserved_ports(ASCII_TABLE,\n ASCII_TABLE['port'],\n hex_port, True)\n self.ascii_table.text = ASCII_TABLE['menu_title'] + '. Current Port: ' + str(\n ASCII_TABLE['port'])\n toast_message = f'Changed ASCII Table I/O port number to {port}'\n except MemoryError as e:\n toast_message = str(e)\n else:\n update_reserved_ports(HEX_KEYBOARD,\n HEX_KEYBOARD['port'],\n hex_port)\n self.hex_keyboard.text = HEX_KEYBOARD['menu_title'] + '. Current Port: ' + str(\n HEX_KEYBOARD['port'])\n toast_message = f'Changed HEX Keyboard I/O port number to {port}'\n toast(toast_message)\n else:\n toast('Invalid input. That port is reserved!')\n except ValueError as e:\n toast(f'Not a valid port!')", "def fusion_api_edit_interconnect_ports(self, body, uri, api=None, param='', headers=None):\n param = '/update-ports%s' % param\n return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)", "def update_volt_range(self):\r\n self.volt_range_index = self.VoltmeterRangeValue.currentIndex()\r\n self.cmd = self.volt_range_switch.get(self.volt_range_index, None)\r\n if self.connected:\r\n self.I_source.write(self.cmd)", "def update_board(self, mpos):\n pass", "def battery_update(self, battery_level, device_address):\n\n num_widgets = self.ports_found.count()\n for idx in range(num_widgets):\n\n # Ignore port widgets (only interested in Myo device rows)\n list_widget = self.ports_found.item(idx)\n if hasattr(list_widget, \"port_idx\"):\n continue\n\n myo_widget = self.ports_found.itemWidget(list_widget)\n\n if myo_widget.myo_device[\"sender_address\"].endswith(device_address):\n myo_widget.battery_level.setValue(battery_level)", "def command_wrapupdate(self):\n wrapupdater.main(*self.args())", "def _number_list_index_changed(self, *a):\r\n self.api.set_list_index(self.number_list_index.get_value())\r\n \r\n # Make sure.\r\n n = self.api.get_list_index()\r\n self.number_list_index.set_value(n, block_events=True)\r\n \r\n # Update the frequency and power in the safest possible way\r\n# fs = self.api.get_list_frequencies()\r\n# ps = self.api.get_list_powers()\r\n# self.number_dbm.set_value(ps[n])\r\n# self.number_frequency.set_value(fs[n])\r\n \r\n # Update the frequency and power using the graph if we have it.\r\n \r\n # If enabled, things are out of sync, get the list.\r\n if self.button_send_list._widget.isEnabled(): self.query_list()\r\n \r\n # Get the power and frequency from the plot\r\n self.number_dbm .set_value(self.plot_list['P_dBm'][n])\r\n self.number_frequency.set_value(self.plot_list['f_Hz'][n])", "def test_update_health_monitor(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.UpdateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--timeout', '5'],\r\n {'timeout': '5', })", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def add_in_port(self, m: int, content: str, **opts) -> None:", "def test_cycle_monitor(hlwm, mon_num, focus_idx, delta, command):\n for i in range(1, mon_num):\n hlwm.call('add tag' + str(i))\n hlwm.call('add_monitor 800x600+' + str(i * 10))\n hlwm.call(['focus_monitor', str(focus_idx)])\n assert hlwm.get_attr('monitors.focus.index') == str(focus_idx)\n assert hlwm.get_attr('monitors.count') == str(mon_num)\n\n hlwm.call([command, delta])\n\n new_index = (focus_idx + int(delta) + mon_num) % mon_num\n assert hlwm.get_attr('monitors.focus.index') == str(new_index)", "def remote_update(self, increment):\r\n\r\n self.window += increment", "def refreshPorts(self, event):\n logging.debug(\"Refreshing ports.\")\n self.availablePorts = self.controller.getAvailablePorts()\n\n # Delete old dropdown options\n self.portSelector[\"menu\"].delete(0, \"end\")\n for value in self.availablePorts:\n\n def _callback(value=value):\n self.controller.updatePort(value)\n self.serialPortVar.set(value)\n\n self.portSelector[\"menu\"] \\\n .add_command(label=value,\n command=_callback)\n return", "def updateListCtrl(self, variables):\n\t\tfs = \"%.3f%%\"\n\t\tfs2 = \"%.4f\"\n\t\tds = \"%d\"\n\t\tss = \"%s\"\n\t\toffset = -2\n\t\tcoloffset = 1\n\t\tn = 2\n\t\tmapping = { \"Ch1Th\":(n, 0, ss),\n\t\t\t\t \"Ch2Th\":(n + 1, 0, ss),\n\t\t\t\t \"OverThresholdCh1\":(n + 2, 0, ss),\n\t\t\t\t \"ColocAmount\":(n + 3, 0, ds),\n\t\t\t\t \"PercentageVolumeCh1\":(n + 4, 0, ss),\n\t\t\t\t \"PercentageVolumeCh2\":(n + 5, 0, ss),\n\t\t\t\t \"ThresholdM1\":(n + 6, 0, fs2),\n\t\t\t\t \"ThresholdM2\":(n + 7, 0, fs2),\n\t\t\t\t \"PValue\":(n + 8, 0, fs2),\n\t\t\t\t \"ColocPercent\":(n + 9, 0, fs, 100),\n# volume = number of voxels (Imaris)\n# material = intensity\n\t\t\t\t \"PercentageTotalCh1\":(n + 10, 0, fs, 100),\n\t\t\t\t \"PercentageTotalCh2\":(n + 11, 0, fs, 100),\n\t\t\t\t \"PearsonWholeImage\":(n + 12, 0, fs2),\n\t\t\t\t \"PearsonImageAbove\":(n + 13, 0, fs2),\n\t\t\t\t \"PearsonImageBelow\":(n + 14, 0, fs2),\n#\t\t\t\t \"M1\":(9,0,fs2),\n#\t\t\t\t \"M2\":(10,0,fs2),\n\t\t\t\t \"SumCh1\":(n + 15, 0, ss),\n\t\t\t\t \"SumCh2\":(n + 16, 0, ss),\n\t\t\t\t \"NonZeroCh1\":(n + 17, 0, ss),\n\t\t\t\t \"DiffStainVoxelsCh1\":(n + 18, 0, ss),\n\t\t\t\t \"DiffStainVoxelsCh2\":(n + 19, 0, ss),\n\n\t\t\t\t \"DiffStainPercentageCh1\":(n + 20, 0, ss),\n\t\t\t\t \"DiffStainPercentageCh2\":(n + 21, 0, ss),\n\t\t\t\t \"RObserved\":(n + 22, 0, fs2),\n\t\t\t\t \"RRandMean\":(n + 23, 0, ss),\n\t\t\t\t \"NumIterations\":(n + 24, 0, ss)\n\t\t}\n\t \n\t \t#if scripting.TFLag:\n\t \t#\tdel mapping[\"DiffStainPercentageCh1\"]\n\t \t#\tdel mapping[\"DiffStainPercentageCh2\"]\n\t \t#\tdel mapping[\"DiffStainVoxelsCh1\"]\n\t \t#\tdel mapping[\"DiffStainVoxelsCh2\"]\n\t \t#\tdel mapping[\"RObserved\"]\n\t \t#\tdel mapping[\"RRandMean\"]\n\t \t#\tdel mapping[\"NumIterations\"]\n\t \t\t\n\t \t\t\n\t \tdefined = len(variables.keys())\n\t \t\t\n\t\tfor item in mapping.keys():\n\t\t\tval = 0.0\n\t\t\tval1 = \"\"\n\t\t\tval2 = \"\"\n\t\t\tif item == \"Ch1Th\":\n\t\t\t\tif defined:\n\t\t\t\t\tth1 = variables.get(\"LowerThresholdCh1\")\n\t\t\t\t\tth2 = variables.get(\"UpperThresholdCh1\")\n\t\t\t\t\tval = \"%d / %d\" % (th1, th2)\n\t\t\t\t\tval1 = th1\n\t\t\t\t\tval2 = th2\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 128\"\n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 128\n\t\t\telif item == \"Ch2Th\":\n\t\t\t\tif defined:\n\t\t\t\t\tth1 = variables.get(\"LowerThresholdCh2\")\n\t\t\t\t\tth2 = variables.get(\"UpperThresholdCh2\")\n\t\t\t\t\tval = \"%d / %d\" % (th1, th2)\n\t\t\t\t\tval1, val2 = th1, th2\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 128\"\n\t\t\t\t\tval1, val2 = 0, 128\n\t\t\telif item == \"PercentageVolumeCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tpvolch = variables.get(item)\n\t\t\t\t\tpmatch = variables.get(\"PercentageMaterialCh1\")\n\t\t\t\t\tif not pvolch:\n\t\t\t\t\t\tpvolch = 0\n\t\t\t\t\tif not pmatch:\n\t\t\t\t\t\tpmatch = 0\n\t\t\t\t\tval = \"%.3f%% / %.3f%%\" % (pvolch * 100, pmatch * 100)\n\t\t\t\t\tval1 = \"%.3f%%\" % (pvolch * 100)\n\t\t\t\t\tval2 = \"%.3f%%\" % (pmatch * 100)\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000% / 0.000%\"\n\t\t\t\t\tval1 = \"0.000%\"\n\t\t\t\t\tval2 = \"0.000%\"\n\t\t\telif item == \"PercentageVolumeCh2\":\n\t\t\t\tif defined:\n\t\t\t\t\tpvolch = variables.get(item)\n\t\t\t\t\tpmatch = variables.get(\"PercentageMaterialCh2\")\n\t\t\t\t\tif not pvolch:\n\t\t\t\t\t\tpvolch = 0\n\t\t\t\t\tif not pmatch:\n\t\t\t\t\t\tpmatch = 0\n\t\t\t\t\tval = \"%.3f%% / %.3f%%\" % (pvolch * 100, pmatch * 100)\n\t\t\t\t\tif not pvolch:\n\t\t\t\t\t\tpvolch = 0\n\t\t\t\t\tif not pmatch:\n\t\t\t\t\t\tpmatch = 0\n\t\t\t\t\tval1 = \"%.3f%%\" % (pvolch * 100)\n\t\t\t\t\tval2 = \"%.3f%%\" % (pmatch * 100)\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000% / 0.000%\"\n\t\t\t\t\tval1 = \"0.000%\"\n\t\t\t\t\tval2 = \"0.000%\"\n\t\t\telif item == \"SumCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tsum = variables.get(item)\n\t\t\t\t\tsumth = variables.get(\"SumOverThresholdCh1\")\n\t\t\t\t\t\n\t\t\t\t\tif not sum:\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\tif not sumth:\n\t\t\t\t\t\tsumth = 0\n\t\t\t\t\tval = \"%d / %d\" % (sum, sumth)\n\t\t\t\t\tval1 = sum\n\t\t\t\t\tval2 = sumth\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 0\"\n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 0\n\t\t\telif item == \"SumCh2\":\n\t\t\t\tif defined:\n\t\t\t\t\tsum = variables.get(item)\n\t\t\t\t\tsumth = variables.get(\"SumOverThresholdCh2\")\n\t\t\t\t\tif not sum:\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\tif not sumth:\n\t\t\t\t\t\tsumth = 0\n\t\t\t\t\tval = \"%d / %d\" % (sum, sumth)\n\t\t\t\t\tval1 = sum\n\t\t\t\t\tval2 = sumth\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 0\" \n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 0\n\t\t\telif item == \"NonZeroCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tsum = variables.get(item)\n\t\t\t\t\tsumth = variables.get(\"NonZeroCh2\")\n\t\t\t\t\tif not sum:\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\tif not sumth:\n\t\t\t\t\t\tsumth = 0\n\t\t\t\t\tval = \"%d / %d\" % (sum, sumth)\n\t\t\t\t\tval1 = sum\n\t\t\t\t\tval2 = sumth\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 0\" \n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 0\n\t\t\telif item == \"OverThresholdCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tsum = variables.get(item)\n\t\t\t\t\tsumth = variables.get(\"OverThresholdCh2\")\n\t\t\t\t\tif not sum:\n\t\t\t\t\t\tsum = 0\n\t\t\t\t\tif not sumth:\n\t\t\t\t\t\tsumth = 0\n\t\t\t\t\tval = \"%d / %d\" % (sum, sumth)\n\t\t\t\t\tval1 = sum\n\t\t\t\t\tval2 = sumth\n\t\t\t\telse:\n\t\t\t\t\tval = \"0 / 0\" \n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 0 \n\n\t\t\telif item == \"DiffStainVoxelsCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tds = variables.get(item)\n\t\t\t\t\tdsint = variables.get(\"DiffStainIntCh1\")\n\t\t\t\t\tif not ds:\n\t\t\t\t\t\tds = 0\n\t\t\t\t\tif not dsint:\n\t\t\t\t\t\tdsint = 0\n\t\t\t\t\tval = \"%.3f / %.3f\" % (ds, dsint)\n\t\t\t\t\tval1 = ds\n\t\t\t\t\tval2 = dsint\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000 / 0.000\"\t\t\t \n\t\t\t\t\tval1 = 0.000\n\t\t\t\t\tval2 = 0.000\n\t\t\telif item == \"DiffStainVoxelsCh2\":\n\t\t\t\tif defined:\n\t\t\t\t\tds = variables.get(item)\n\t\t\t\t\tdsint = variables.get(\"DiffStainIntCh2\")\n\t\t\t\t\tif not ds:\n\t\t\t\t\t\tds = 0\n\t\t\t\t\tif not dsint:\n\t\t\t\t\t\tdsint = 0\n\t\t\t\t\tval = \"%.3f / %.3f\" % (ds, dsint)\n\t\t\t\t\tval1 = ds\n\t\t\t\t\tval2 = dsint\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000 / 0.000\"\n\t\t\t\t\tval1 = 0.000\n\t\t\t\t\tval2 = 0.000\n\t\t\t\t\t\n\t\t\telif item == \"DiffStainPercentageCh1\":\n\t\t\t\tif defined:\n\t\t\t\t\tds = variables.get(\"DiffStainVoxelsCh1\")\n\t\t\t\t\tdsint = variables.get(\"DiffStainIntCh1\")\n\t\t\t\t\tif not ds:\n\t\t\t\t\t\tds = 0\n\t\t\t\t\tif not dsint:\n\t\t\t\t\t\tdsint = 0\n\t\t\t\t\tds = 1.0 / (ds + 1)\n\t\t\t\t\tdsint = 1.0 / (dsint + 1.0)\n\t\t\t\t\tval = \"%.3f / %.3f\" % (ds, dsint)\n\t\t\t\t\tval1 = ds\n\t\t\t\t\tval2 = dsint\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000 / 0.000\"\n\t\t\t\t\tval1 = 0.0\n\t\t\t\t\tval2 = 0.0\n\t\t\telif item == \"DiffStainPercentageCh2\":\n\t\t\t\tif defined:\n\t\t\t\t\tds = variables.get(\"DiffStainVoxelsCh2\")\n\t\t\t\t\tdsint = variables.get(\"DiffStainIntCh2\")\n\t\t\t\t\tif not ds:\n\t\t\t\t\t\tds = 0\n\t\t\t\t\tif not dsint:\n\t\t\t\t\t\tdsint = 0\n\t\t\t\t\tds = 1.0 / (ds + 1)\n\t\t\t\t\tdsint = 1.0 / (dsint + 1.0)\n\t\t\t\t\tval = \"%.3f / %.3f\" % (ds, dsint)\n\t\t\t\t\tval1 = ds\n\t\t\t\t\tval2 = dsint\n\t\t\t\telse:\n\t\t\t\t\tval = \"0.000 / 0.000\"\n\t\t\t\t\tval1 = 0.0\n\t\t\t\t\tval2 = 0.0\t\t\t\t\t \n\t\t\telif item == \"RRandMean\":\n\t\t\t\tif defined:\n\t\t\t\t\trandmean = variables.get(item)\n\t\t\t\t\tstdev = variables.get(\"RRandSD\")\n\t\t\t\t\tif not randmean:\n\t\t\t\t\t\trandmean = 0\n\t\t\t\t\tif not stdev:\n\t\t\t\t\t\tstdev = 0\n\t\t\t\telse:\n\t\t\t\t\trandmean = 0\n\t\t\t\t\tstdev = 0\n\t\t\t\tval = u\"%.3f \\u00B1 %.5f\" % (randmean, stdev)\n\t\t\telif item == \"NumIterations\":\n\t\t\t\tif defined:\n\t\t\t\t\tn1 = variables.get(\"NumIterations\")\n\t\t\t\t\tn2 = variables.get(\"ColocCount\")\n\t\t\t\t\tif not n1:\n\t\t\t\t\t\tn1 = 0\n\t\t\t\t\tif not n2:\n\t\t\t\t\t\tn2 = 0\n\t\t\t\t\tval1 = int(n1 - n2)\n\t\t\t\t\tval2 = n2\n\t\t\t\telse:\n\t\t\t\t\tval1 = 0\n\t\t\t\t\tval2 = 0\n\t\t\t\tval = \"%d / %d\" % (val1, val2)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tval = variables.get(item, None)\n\t\t\tif not val:\n\t\t\t\tval = 0\n\t\t\ttry:\n\t\t\t\tindex, col, format, scale = mapping[item]\n\t\t\texcept:\n\t\t\t\tindex, col, format = mapping[item]\n\t\t\t\tscale = 1\n\t\t\tindex += offset\n\t\t\tcol += coloffset\n\t\t\tval *= scale\n\t\t\t\n\t\t\tif not val1:\n\t\t\t\tval1 = val\n\t\t\t\tval2 = \"\"\n\t\t\tself.headervals[index][1] = val1\n\t\t\tself.headervals[index][2] = val2\n\t\t\tself.SetStringItem(index, col, format % val)", "def update_monitor(request, **kwargs):\n data = request.DATA\n monitor_id = data['monitor']['id']\n hm_type = data['monitor']['type']\n\n conn = get_sdk_connection(request)\n healthmonitor_kwargs = {\n 'delay': data['monitor'].get('delay'),\n 'timeout': data['monitor'].get('timeout'),\n 'max_retries': data['monitor'].get('max_retries'),\n 'max_retries_down': data['monitor'].get('max_retries_down'),\n 'admin_state_up': data['monitor'].get('admin_state_up'),\n 'name': data['monitor'].get('name')\n }\n if hm_type in ('HTTP', 'HTTPS'):\n healthmonitor_kwargs.update({\n 'http_method': data['monitor'].get('http_method'),\n 'url_path': data['monitor'].get('url_path'),\n 'expected_codes': data['monitor'].get('expected_codes')\n })\n\n healthmonitor = conn.load_balancer.update_health_monitor(\n monitor_id,\n **healthmonitor_kwargs\n )\n\n return _get_sdk_object_dict(healthmonitor)", "def update(self, methodName=None, elementName=None, args=None, kwargs=None):\n #if methodName != 'run':\n # return\n ##print methodName\n ##from dbgp.client import brk; brk(port=9011)\n #self.iteration = self.experiment.position.iter\n #exp = self.experiment\n ## check if the pause button was clicked\n #if self.pause:\n # exp.pause = True\n #elif self.runCount is not None:\n # self.runCount -= 1\n # if self.runCount == 0:\n # exp.pause = True\n #\n #runtimelistener.listenersEnabled = exp.pause" ]
[ "0.5833842", "0.5745774", "0.5347681", "0.52959704", "0.5214855", "0.5176801", "0.51571023", "0.5150093", "0.51301146", "0.51239413", "0.51145864", "0.5099718", "0.50938296", "0.5025443", "0.5011605", "0.49955344", "0.49346185", "0.49290094", "0.4830432", "0.48290905", "0.4820513", "0.4814127", "0.48100203", "0.48072127", "0.48068413", "0.47966596", "0.47916225", "0.4787656", "0.47833338", "0.47742814" ]
0.6569742
0
Updates the telemetry configuration for the given LI [Arguments]
def fusion_api_update_li_telemetry_configuration(self, body=None, uri=None, api=None, headers=None): return self.li.update(body=body, uri=uri, api=api, headers=headers, param="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, *args, **kwargs):\n self.logger.update(*args, **kwargs)", "def update(self, args):\n pass", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def UpdateConfig(self, instalog_config, update_info, env):\n if update_info.get('data_truncate', {}).get('enable', False):\n # If enable data_truncate, Instalog truncate once a day.\n instalog_config['buffer']['args']['truncate_interval'] = 86400\n\n threshold = update_info.get('input_http', {}).get(\n 'log_level_threshold', logging.NOTSET)\n instalog_config['input']['http_in']['args']['log_level_threshold'] = (\n threshold)\n\n if update_info.get('forward', {}).get('enable', False):\n args = update_info.get('forward', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_pull_socket_port\n instalog_config['output']['forward'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('forward')\n\n if update_info.get('customized_output', {}).get('enable', False):\n args = update_info.get('customized_output', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_customized_output_port\n instalog_config['output']['customized_output'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append(\n 'customized_output')\n\n if update_info.get('archive', {}).get('enable', False):\n instalog_config['output']['archive'] = {\n 'plugin': 'output_archive',\n 'args': update_info.get('archive', {}).get('args', {}).copy()\n }\n # Set the target_dir.\n target_dir = os.path.join(env.umpire_data_dir, 'instalog_archives')\n instalog_config['output']['archive']['args']['target_dir'] = target_dir\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('archive')", "def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)", "def update_log_config(self, monitor_name, log_config):\n pass", "def update_global_config(self, config, **kwargs):\n pass", "def fusion_api_reapply_li_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.li.update(body=None, uri=uri, api=api, headers=headers, param=param)", "def update_from_args(self, args):\n args = vars(args)\n for key in args:\n if isinstance(getattr(self, key), tf.Variable):\n getattr(self, key).assign(args[key])\n else:\n setattr(self, key, args[key])\n \n # Set the config on the data class\n self.data = DataConfig(\n self.xml_annotation_path,\n self.csv_annotation_path,\n self.oxford_annotations_path,\n self.oxford_images_path,\n )", "def update(*args):", "def SetLoggingLevel(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"setLoggingLevel\", payload=payload, response_object=None)", "def update_config(config, args):\n if args.cfg:\n _update_config_from_file(config, args.cfg)\n config.defrost()\n if args.dataset:\n config.DATA.DATASET = args.dataset\n if args.batch_size:\n config.DATA.BATCH_SIZE = args.batch_size\n config.DATA.BATCH_SIZE_EVAL = args.batch_size\n if args.batch_size_eval:\n config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval\n if args.image_size:\n config.DATA.IMAGE_SIZE = args.image_size\n if args.accum_iter:\n config.TRAIN.ACCUM_ITER = args.accum_iter\n if args.data_path:\n config.DATA.DATA_PATH = args.data_path\n if args.output:\n config.SAVE = args.output\n if args.eval:\n config.EVAL = True\n if args.pretrained:\n config.MODEL.PRETRAINED = args.pretrained\n if args.resume:\n config.MODEL.RESUME = args.resume\n if args.last_epoch:\n config.TRAIN.LAST_EPOCH = args.last_epoch\n if args.amp: # only for training\n config.AMP = not config.EVAL\n config.freeze()\n return config", "def configure(self, args):\n pass", "def SetTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('setTapSettings', payload=payload, response_object=None)", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def update_state(self, **kwargs):\n\n for name in self.metrics:\n\n metric = self.metrics[name]\n\n argspec = inspect.getfullargspec(metric.update_state)\n\n kwargs_to_pass = {k: kwargs[k] for k in kwargs if k in argspec.args}\n\n metric.update_state(**kwargs_to_pass)", "def update_args(self, args):\n self.args = self.parser.parse_args(args)", "def conf_update(self):\n pass", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def config( **kwargs ):", "def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])", "def update_parameters(self, timestamp, inputs):\n pass", "def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track],\n metric_time: datetime.datetime, *args, **kwargs):\n\n raise NotImplementedError", "def update(self, arg=None, **kwargs):\n if arg:\n if hasattr(arg, 'keys'):\n for k in arg: self[k] = arg[k]\n else:\n for k, v in arg: self[k] = v\n\n if kwargs:\n for k in kwargs: self[k] = kwargs[k]", "def SetTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"setTapSettings\", payload=payload, response_object=None)", "def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)", "def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config", "def config_logger(args):\n log_level = logging.INFO if args.log_level and args.log_level == 'INFO' else logging.DEBUG\n log.setLevel(log_level)\n log_handler = logging.StreamHandler()\n log_formatter = logging.Formatter('%(levelname)s: %(asctime)s - %(name)s:%(lineno)d - %(message)s')\n log_handler.setFormatter(log_formatter)\n log.addHandler(log_handler)" ]
[ "0.5785478", "0.55439204", "0.55149126", "0.55022407", "0.5438186", "0.5425908", "0.5271924", "0.5231504", "0.51998127", "0.5185919", "0.51744473", "0.5174025", "0.5156858", "0.51431435", "0.51091546", "0.50894624", "0.5031817", "0.5026999", "0.5024866", "0.5021545", "0.5017173", "0.5013075", "0.49972984", "0.49946508", "0.49859744", "0.49737215", "0.4965531", "0.49596435", "0.49284372", "0.4904995" ]
0.6669905
0
Updates the qosaggregatedconfiguration for the given LI [Arguments]
def fusion_api_update_qos_aggregated_configuration(self, body=None, uri=None, api=None, headers=None): param = '/qos-aggregated-configuration' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_qos(self, arg, qos):\n\n if isinstance(arg, (list, tuple)):\n for job_id in arg:\n self.change_qos(job_id, qos)\n\n elif isinstance(arg, int):\n cmd = 'update job {} QOS={}'.format(arg, qos)\n self.scontrol(cmd)\n\n elif str(arg).lower() == 'all':\n self._queue = None\n for job_id, attrs in self.queue.items():\n status = attrs[self.QCOL_STATUS].lower()\n if status == 'pd':\n self.change_qos(job_id, qos)\n\n else:\n e = ('Could not change qos of: {} with type {}'\n .format(arg, type(arg)))\n logger.error(e)\n raise ExecutionError(e)", "def set_qos(self, qos_id, set_specs_args):\n aname = \"cinder_v%s.set_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.set_keys(qos_id,\n set_specs_args)", "def update_configuration(age=72, retain_unsent=False)->dict:\n event_loop = asyncio.get_event_loop()\n event_loop.run_until_complete(configuration_manager.set_category_item_value_entry(_CONFIG_CATEGORY_NAME, \n 'age', age))\n event_loop.run_until_complete(configuration_manager.set_category_item_value_entry(_CONFIG_CATEGORY_NAME,\n 'retainUnsent', retain_unsent))\n return event_loop.run_until_complete(configuration_manager.get_category_all_items(_CONFIG_CATEGORY_NAME))", "def set_qos(self, qos, set_specs_args):\n self._impl.set_qos(qos.id, set_specs_args)\n return self._unify_qos(qos)", "def VolumeSetQos(min_iops,\n max_iops,\n burst_iops,\n#pylint: disable=unused-argument\n volume_names,\n volume_ids,\n volume_prefix,\n volume_regex,\n volume_count,\n source_account,\n source_account_id,\n test,\n mvip,\n username,\n password):\n#pylint: enable=unused-argument\n\n options = copy.deepcopy(locals())\n for key in [\"min_iops\", \"max_iops\", \"burst_iops\"]:\n options.pop(key, None)\n\n return VolumeModify(property_name=\"qos\",\n property_value={\n \"minIOPS\" : min_iops,\n \"maxIOPS\" : max_iops,\n \"burstIOPS\" : burst_iops\n },\n **options)", "def fusion_api_update_snmp_configuration(self, body=None, uri=None, api=None, headers=None):\n param = '/snmp-configuration'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)", "def test_400_enable_qos(self):\n if self._get_openstack_release() >= self.trusty_mitaka:\n unit = self.n_ovs_sentry\n set_default = {'enable-qos': 'False'}\n set_alternate = {'enable-qos': 'True'}\n self.d.configure('neutron-api', set_alternate)\n self._wait_and_check(sleep=60)\n qos_plugin = 'qos'\n config = u._get_config(\n self.neutron_api_sentry, '/etc/neutron/neutron.conf')\n service_plugins = config.get(\n 'DEFAULT',\n 'service_plugins').split(',')\n if qos_plugin not in service_plugins:\n message = \"{} not in service_plugins\".format(qos_plugin)\n amulet.raise_status(amulet.FAIL, msg=message)\n\n config = u._get_config(\n unit,\n '/etc/neutron/plugins/ml2/openvswitch_agent.ini')\n extensions = config.get('agent', 'extensions').split(',')\n if qos_plugin not in extensions:\n message = \"qos not in extensions\"\n amulet.raise_status(amulet.FAIL, msg=message)\n\n u.log.debug('Setting QoS back to {}'.format(\n set_default['enable-qos']))\n self.d.configure('neutron-api', set_default)\n self._wait_and_check()\n u.log.debug('OK')", "def add_qos(self, qos):\n \n qos_id = qos[\"ovsdb:qos-entries\"][0][\"qos-id\"]\n self.qos_dict[qos_id] = qos", "def get_data_qos(args):\n diff_data(args, \"qos\")", "def set_qos_stat_type(self, iface, ptype):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def ra_llc_configuration_set(host_id, llc_configuration_fields, llc_configuration_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n result = ''\n err1 = [0, 0, 0, 0, 0]\n form_name = ['ARQ Mode', 'ArqWin(Retransmit Window Size)', 'Frame Loss Threshold',\n 'Leaky Bucket Timer', 'Frame Loss Time Out']\n param = []\n dictarr = []\n resultarray = {}\n param.append('llcArqEnable.1')\n param.append('arqWin.1')\n param.append('frameLossThreshold.1')\n param.append('leakyBucketTimerVal.1')\n param.append('frameLossTimeout.1')\n ra_llc_config = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n ra_llc_config = sqlalche_obj.session.query(SetOdu16RALlcConfTable).filter(\n SetOdu16RALlcConfTable.config_profile_id == device_param_list[0][4]).first()\n for i in range(len(llc_configuration_fields)):\n oidname = oid_name[llc_configuration_fields[i]]\n oidtype = oid_type[llc_configuration_fields[i]]\n oidvalue = llc_configuration_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n val = ''\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\"Values Updated in UBR LLC Form\", \"%s\" % (user_name))\n for j in range(0, len(llc_configuration_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = llc_configuration_param[j]\n dict[\"textbox\"] = llc_configuration_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n ra_llc_config.llc_arq_enable = llc_configuration_param[0]\n if err1[1] == 1:\n ra_llc_config.arq_win = llc_configuration_param[1]\n if err1[2] == 1:\n ra_llc_config.frame_loss_threshold = llc_configuration_param[2]\n if err1[3] == 1:\n ra_llc_config.leaky_bucket_timer_val = llc_configuration_param[3]\n if err1[4] == 1:\n ra_llc_config.frame_loss_timeout = llc_configuration_param[4]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16RALlcConfTable'\n resultarray['formAction'] = 'Llc_Cancel_Configuration.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def test_b_negative_add_qos(self):\n qoss = {\"qos_120\": 120, \"qos_-5\": -5}\n for qos_name, qos_value in qoss.iteritems():\n testflow.step(\n \"Create CPU QoS %s on datacenter %s with parameters: %s\",\n qos_name, conf.DC_NAME[0], qos_value\n )\n assert not ll_datacenters.add_qos_to_datacenter(\n datacenter=conf.DC_NAME[0],\n qos_name=qos_name,\n qos_type=conf.QOS_TYPE_CPU,\n cpu_limit=qos_value\n )", "def test_qos_specs(self):\n qos = {'maxIOPS': 1000, 'maxBWS': 2048}\n snapshot = fake_snapshot.fake_snapshot_obj(\n self.ctx, **{'volume': self.volume,\n 'provider_id': self.snapshot_id,\n 'volume_size': 8})\n extraspecs = {}\n self.driver._get_volumetype_qos = mock.MagicMock()\n self.driver._get_volumetype_qos.return_value = qos\n self.driver._get_volumetype_extraspecs = mock.MagicMock()\n self.driver._get_volumetype_extraspecs.return_value = extraspecs\n\n props = self.driver.initialize_connection_snapshot(\n snapshot,\n self.connector)\n\n self.assertEqual(1000, int(props['data']['iopsLimit']))\n self.assertEqual(2048, int(props['data']['bandwidthLimit']))", "def gbp_policy_cfg_upd_all(self, cfgobj, name_uuid, attr):\n cfgobj_dict = {\"action\": \"policy-action\",\n \"classifier\": \"policy-classifier\",\n \"rule\": \"policy-rule\",\n \"ruleset\": \"policy-rule-set\",\n \"group\": \"policy-target-group\",\n \"target\": \"policy-target\",\n \"l2p\": \"l2policy\",\n \"l3p\": \"l3policy\",\n \"nsp\": \"network-service-policy\",\n \"extseg\": \"external-segment\",\n \"extpol\": \"external-policy\",\n \"natpool\": \"nat-pool\"}\n if cfgobj != '':\n if cfgobj not in cfgobj_dict:\n raise KeyError\n if name_uuid == '' or not isinstance(attr, dict):\n _log.info('''Function Usage: gbp_policy_cfg_upd_all 'rule' \"abc\"\n {attr:attr_val}\\n\n --cmd_val == 0:delete; 1:create; 2:update\\n\n -- name_uuid == UUID or name_string\\n''')\n return 0\n\n # Build the command with mandatory params\n cmd = 'gbp %s-update ' % cfgobj_dict[cfgobj] + str(name_uuid)\n # Build the cmd string for optional/non-default args/values\n for arg, value in six.iteritems(attr):\n if '_' in arg:\n arg = string.replace(arg, '_', '-')\n cmd = cmd + \" --\" + (\"%s %s\" % (arg, value))\n _log.info(cmd)\n # Execute the update cmd\n cmd_out = commands.getoutput(cmd)\n #_log.info(cmd_out)\n # Catch for non-exception error strings, even though try clause\n # succeded\n if self.cmd_error_check(cmd_out) == 0:\n return 0\n return 1", "def update_device_pool(arn=None, name=None, description=None, rules=None):\n pass", "def set_qos_key(self, qos_id, **kwargs):\n put_body = json.dumps({\"qos_specs\": kwargs})\n resp, body = self.put('qos-specs/%s' % qos_id, put_body)\n body = json.loads(body)\n self.validate_response(schema.set_qos_key, resp, body)\n return rest_client.ResponseBody(resp, body)", "def qos_type(self, qos_type):\n\n self._qos_type = qos_type", "def pre_qos_queue_update(self, resource_id, resource_dict):\n pass", "def update_qos(tenant_id, qos_id, new_qos_name=None):\n session = db.get_session()\n try:\n qos = (session.query(network_models_v2.QoS).\n filter_by(tenant_id=tenant_id).\n filter_by(qos_id=qos_id).one())\n if new_qos_name:\n qos[\"qos_name\"] = new_qos_name\n session.merge(qos)\n session.flush()\n return qos\n except exc.NoResultFound:\n raise c_exc.QosNotFound(qos_id=qos_id,\n tenant_id=tenant_id)", "def post_qos_queue_update(self, resource_id, resource_dict):\n pass", "def add_qos_sai_args(parser):\n qos_group = parser.getgroup(\"QoS test suite options\")\n\n qos_group.addoption(\n \"--disable_test\",\n action=\"store\",\n type=str2bool,\n default=True,\n help=\"Control execution of buffer watermark experimental tests\",\n )\n\n qos_group.addoption(\n \"--qos_dst_ports\",\n action=\"store\",\n type=lambda opt_value: [int(v) for v in opt_value.translate(None, \"[]\").split(',')],\n default=None,\n help=\"QoS SAI comma separated list of destination ports. Test currently expects exactly 3 destination ports\",\n )\n\n qos_group.addoption(\n \"--qos_src_ports\",\n action=\"store\",\n type=lambda opt_value: [int(v) for v in opt_value.translate(None, \"[]\").split(',')],\n default=None,\n help=\"QoS SAI comma separated list of source ports. Test currently expects exactly 1 source port\",\n )\n\n qos_group.addoption(\n \"--qos_dual_tor\",\n action=\"store\",\n type=str2bool,\n default=False,\n help=\"Test QoS on dual ToR ports\"\n )", "def set_qos(self, on_ok):\n self._channel.basic_qos(\n prefetch_count=self._prefetch_count, callback=on_ok)", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_elements__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def _set_qos(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"qos must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_qos_openconfig_qos_interfaces__qos, is_container='container', yang_name=\"qos\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__qos = t\n if hasattr(self, '_set'):\n self._set()", "def qos_workload_modify(self, workload_name, read_ahead=None):\n return self.request( \"qos-workload-modify\", {\n 'workload_name': [ workload_name, 'workload-name', [ basestring, 'None' ], False ],\n 'read_ahead': [ read_ahead, 'read-ahead', [ basestring, 'None' ], False ],\n }, {\n } )", "def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)", "async def qos(\n self,\n prefetch_size: int = 0,\n prefetch_count: int = 0,\n connection_global: bool = False,\n ):\n await self.channel.basic_qos(\n prefetch_size=prefetch_size,\n prefetch_count=prefetch_count,\n connection_global=connection_global,\n )", "def config_attributes(dut, **kwargs):\n cli_type = st.get_ui_type(dut, **kwargs)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n sflow_key = kwargs.get(\"sflow_key\", \"global\")\n command = \"\"\n commands = list()\n if \"sample_rate\" in kwargs and \"interface_name\" in kwargs:\n if cli_type == \"click\":\n command += \"config sflow interface sample-rate {} {}\".format(kwargs[\"interface_name\"], kwargs[\"sample_rate\"])\n commands.append(command)\n elif cli_type == \"klish\":\n interface_details = utils_obj.get_interface_number_from_name(kwargs[\"interface_name\"])\n if not interface_details:\n st.log(\"Interface details not found {}\".format(interface_details))\n return False\n commands.append(\"interface {} {}\".format(interface_details.get(\"type\"), interface_details.get(\"number\")))\n if \"no_form\" in kwargs:\n command = \"no sflow sampling-rate\"\n else:\n command = \"sflow sampling-rate {}\".format(kwargs[\"sample_rate\"])\n commands.append(command)\n commands.append(\"exit\")\n elif cli_type == \"rest\":\n data = {\"sonic-sflow:sample_rate\":int(kwargs[\"sample_rate\"])}\n url = \"{}/SFLOW_SESSION/SFLOW_SESSION_LIST={}/sample_rate\".format(REST_URI, kwargs[\"interface_name\"])\n output = st.rest_modify(dut, url, data)\n st.log(\"REST config_attributes SAMPLE RATE OUTPUT -- {}\".format(output))\n if output and output[\"status\"] != 204:\n return False\n return True\n else:\n st.log(\"UNSUPPORTED CLI TYPE -- {}\".format(cli_type))\n return False\n st.config(dut, commands, type=cli_type)\n if \"polling_interval\" in kwargs:\n if cli_type == \"click\":\n command += \"config sflow polling-interval {};\".format(kwargs[\"polling_interval\"])\n commands.append(command)\n elif cli_type == \"klish\":\n if \"no_form\" in kwargs:\n command = \"no sflow polling-interval\"\n else:\n command = \"sflow polling-interval {}\".format(kwargs[\"polling_interval\"])\n commands.append(command)\n elif cli_type == \"rest\":\n data = {\"sonic-sflow:polling_interval\":int(kwargs[\"polling_interval\"])}\n url = \"{}/SFLOW/SFLOW_LIST={}/polling_interval\".format(REST_URI, sflow_key)\n output = st.rest_modify(dut, url, data)\n st.log(\"REST config_attributes POLLING RATE OUTPUT -- {}\".format(output))\n if output and output[\"status\"] != 204:\n return False\n return True\n else:\n st.log(\"UNSUPPORTED CLI TYPE -- {}\".format(cli_type))\n return False\n st.config(dut, commands, type=cli_type)\n return True", "def conf_update(self):\n pass" ]
[ "0.67901707", "0.5880294", "0.569532", "0.55881894", "0.54956216", "0.53930706", "0.5311097", "0.5298383", "0.5256257", "0.518485", "0.5147952", "0.5064871", "0.5061133", "0.50596696", "0.5018651", "0.5007533", "0.49908927", "0.49765438", "0.4969579", "0.4967064", "0.49368778", "0.49145353", "0.4905444", "0.48995954", "0.4889607", "0.4867293", "0.480391", "0.47728413", "0.47579956", "0.47577944" ]
0.7223659
0
Updates the redistribute logins for the given LI [Arguments]
def fusion_api_update_redistribute_logins(self, body=None, uri=None, api=None, headers=None): param = '/redistributeLogins' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_from_logins(self, logins):\n # Now add contributors using cache (new GitHub contributors) with known email or orcid that isn't present\n for login in logins:\n # Check against contribution threshold, and not bot\n if not self.include_contributor(login):\n continue\n\n cache = self.cache.get(login) or {}\n email = cache.get(\"email\")\n orcid = cache.get(\"orcid\")\n\n # We can only add completely new entries that don't already exist\n if (email != None or orcid != None) and (\n email not in self.email_lookup and orcid not in self.orcid_lookup\n ):\n bot.info(f\" Updating {login}\")\n parts = (cache.get(\"name\") or login).split(\" \")\n entry = {\"@type\": \"Person\", \"givenName\": parts[0]}\n\n # Add the last name if it's defined\n if len(parts) > 1:\n entry[\"familyName\"] = \" \".join(parts[1:])\n\n if email != None:\n entry[\"email\"] = email\n if orcid != None:\n entry[\"@id\"] = \"https://orcid.org/%s\" % orcid\n self.lookup.append(entry)", "def setLSLimits(*args):\n args[0].Limit.LSLimit.ls_limit = args[1]", "def modify_user_lvars(*args):\n return _ida_hexrays.modify_user_lvars(*args)", "def notifyLoginsChanged(self, oldLogins, principal):\n # A user with the new login already exists\n for login in principal.logins:\n if (login not in oldLogins) and (login in self.__id_by_login):\n raise ValueError('Principal Login already taken!, '+ login)\n\n for login in oldLogins:\n del self.__id_by_login[login]\n\n for login in principal.logins:\n self.__id_by_login[login] = principal.__name__", "def ModifyUser(self, modifycount, modify):\n for i in range(modifycount):\n login = string.replace(modify[i]['Login'], ' ', '')\n home = self.__homeprefix + login[0] + '/' + login\n action = 'userman -M ' + login + ' -p ' + modify[i]['Passwd'] + ' -u ' + \\\n str(modify[i]['UID']) + ' -g ' + str(modify[i]['GID']) + ' -H ' + home + ' -s ' + \\\n modify[i]['Shell'] \n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET Modify = 0 WHERE Login = '%s'\" % (login)]", "def setLCLimits(*args):\n args[0].Limit.LCLimit.lc_limit = args[1]", "def do_jls(self, arg):\n\n arg = str(arg).split(' ') \n arg.insert(0,'jls')\n arg = [i for i in arg if i != '']\n \n jail_table(arg)", "def roles(*args):\n env.salt_roles.extend(args)", "def update(login, passwd):\r\n if login is None or passwd is None:\r\n print (\"Give me login and password of Comunio to update the database.\")\r\n exit(1)\r\n\r\n update_all(login, passwd)", "def run_lroles(self, expanded, unexpanded) :\n\t\tif not unexpanded :\n\t\t\treturn self.errormessage('Needs an userid as the first argument')\n\t\tusername = unexpanded[0]\n\t\tif len(unexpanded) > 1 :\n\t\t\troles = unexpanded[1:]\n\t\t\t# Zope accepts even if user and roles don't exist at all\n\t\t\t# so we have to test it ourselves\n\t\t\tif username not in self.__context.acl_users.getUserNames() :\n\t\t\t\treturn self.errormessage(\"Unknown user %s\" % username)\n\t\t\tif not self.HasPerms(self.__context, \"Change permissions\") :\n\t\t\t\treturn -1\n\t\t\t# should we also test if roles exits ?\n\t\t\tself.__context.manage_setLocalRoles(userid=username, roles=roles)\n\t\t\tself.htmlmessage('User %s now has local roles: %s' % (username, string.join(roles, ', ')))\n\t\telse :\n\t\t\tself.__context.manage_delLocalRoles(userids=[username])\n\t\t\tself.htmlmessage('User %s now has no local role' % username)", "def updateOrgAdmins(request):\n\n return updateRole('gsoc_org_admin')", "def change_user(self, login):\n self.task_storage.change_user_config(login)", "def main(args = sys.argv):\n\n parser = parser_setup()\n poptions = parser.parse_args()\n\n if poptions.quiet:\n logging.basicConfig(level=logging.WARNING, format=log_format)\n elif poptions.debug:\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n else:\n # Set up the default logging levels\n logging.basicConfig(level=logging.INFO, format=log_format)\n # Make this a little less noisy by default\n requests_log = logging.getLogger(\"requests.packages.urllib3.connectionpool\")\n requests_log.setLevel(logging.WARN)\n\n if not poptions.base_api_url and \"LIMS_API_URL\" in os.environ:\n api_url = os.environ[\"LIMS_API_URL\"]\n log.debug(\"Using LIMS API endpoint: %s from environment\" % api_url)\n elif poptions.base_api_url:\n api_url = poptions.base_api_url\n log.debug(\"Using LIMS API endpoint: %s from options\" % api_url)\n else:\n sys.stderr.write(\"Could not find LIMS API URL.\\n\")\n sys.exit(1)\n\n\n if not poptions.token and \"LIMS_API_TOKEN\" in os.environ:\n token = os.environ[\"LIMS_API_TOKEN\"]\n elif poptions.token:\n token = poptions.token\n else:\n sys.stderr.write(\"Could not find LIMS API TOKEN.\\n\")\n sys.exit(1)\n\n monitor = ClusterMonitor(api_url, token, cluster_type=poptions.cluster)\n\n monitor.run()", "def set_keys(vessel, identity):\n\n if vessel['vesselname'] != VESSELNAME_TO_SET_USER_KEYS_ON:\n msg = \"[\" + vessel['nodelocation'] + \"] Skipping: vesselname is not: \" + VESSELNAME_TO_SET_USER_KEYS_ON\n print(msg)\n raise Exception(msg)\n\n # convert the list of keys to a list of strings for comparison purposes...\n existingkeystringlist = []\n for thiskey in vessel['userkeys']:\n existingkeystringlist.append(rsa_publickey_to_string(thiskey))\n\n if existingkeystringlist != USERKEY_LIST:\n print(\"[\" + vessel['nodelocation'] + \"] Setting user keys.\")\n try:\n experimentlib.set_vessel_users(vessel['vesselhandle'], identity, USERKEY_LIST)\n except Exception, e:\n msg = \"[\" + vessel['nodelocation'] + \"] Failure: \" + str(e)\n print(msg)\n import traceback\n traceback.print_exc()\n raise Exception(msg)\n else:\n print(\"[\" + vessel['nodelocation'] + \"] Success.\")\n else:\n print(\"[\" + vessel['nodelocation'] + \"] Already had correct user keys.\")", "def update_security(self, hAccessRights):\n\t\treturn Job(SDK.PrlVm_UpdateSecurity(self.handle, conv_handle_arg(hAccessRights))[0])", "def AddInitiators(self, initiatorList):\n # Append the IQNs to the existing list\n full_iqn_list = self.initiators\n for iqn in initiatorList:\n if iqn.lower() in full_iqn_list:\n mylog.debug(iqn + \" is already in group \" + self.name)\n else:\n full_iqn_list.append(iqn)\n\n # Modify the VAG on the cluster\n params = {}\n params[\"volumeAccessGroupID\"] = self.ID\n params[\"initiators\"] = full_iqn_list\n libsf.CallApiMethod(self.mvip, self.username, self.password, \"ModifyVolumeAccessGroup\", params, ApiVersion=5.0)", "def apply_skel(self, skel=None):\n\n\t\t# FIXME: 1 reimplement this cleanly, without shell subcommands\n\n\t\t#import shutil\n\t\t# copytree automatically creates tmp_user_dict['homeDirectory']\n\t\t#shutil.copytree(skel_to_apply, tmp_user_dict['homeDirectory'])\n\n\t\tif skel is None or skel not in LMC.configuration.users.skels:\n\t\t\traise exceptions.BadArgumentError(_(u'Invalid skel \"{0}\". '\n\t\t\t\t'Valid skels are {1}.').format(skel,\n\t\t\t\t\t', '.join(LMC.configuration.users.skels)))\n\n\t\twith self.lock:\n\t\t\tself._checking.set()\n\n\t\t\t# no force option with shutil.copytree(),\n\t\t\t# thus we use cp to force overwrite.\n\t\t\ttry:\n\t\t\t\tprocess.syscmd('cp -rf {0}/* {0}/.??* {1}'.format(\n\t\t\t\t\t\t\tskel, self.__homeDirectory))\n\n\t\t\texcept exceptions.SystemCommandError, e:\n\t\t\t\tlogging.warning(e)\n\t\t\t\tpyutils.print_exception_if_verbose()\n\n\t\t\t# set permission (because we are root)\n\t\t\t# FIXME: this should have already been covered by the inotifier.\n\t\t\tfor fileordir in os.listdir(skel):\n\t\t\t\ttry:\n\t\t\t\t\t# FIXME: do this with minifind(), os.chmod()… and map() it.\n\t\t\t\t\tprocess.syscmd(\"chown -R %s: %s/%s\" % (\n\t\t\t\t\t\tself.__login, self.__homeDirectory, fileordir))\n\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.warning(str(e))\n\n\t\t\ttry:\n\t\t\t\tos.mkdir('%s/%s' % (self.__homeDirectory, LMC.configuration.users.config_dir))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\tif e.errno != 17:\n\t\t\t\t\t# don't bork if already exists, else bork.\n\t\t\t\t\traise\n\n\t\t\tself._checking.clear()\n\n\t\t\tLicornEvent('user_skel_applyed', user=self.proxy, skel=skel).emit(priorities.LOW)\n\n\t\t\tlogging.notice(_(u'Applyed skel {0} for user {1}').format(\n\t\t\t\t\t\t\t\t\t\tskel, stylize(ST_LOGIN, self.__login)))", "def changeRoleInfo(self, role, info):", "def change(login):\n try:\n manager = Actions()\n manager.change_user(login)\n except Exception as e:\n print(e)", "def updateScc(self,icpdInstallLogFile):\n methodName = \"updateScc\"\n TR.info(methodName,\"Start Updating SCC for Portworx Installation\")\n \"\"\"\n oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-account\n oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:portworx-pvc-controller-account\n oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-lh-account\n oc adm policy add-scc-to-user anyuid system:serviceaccount:kube-system:px-lh-account\n oc adm policy add-scc-to-user anyuid system:serviceaccount:default:default\n oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:px-csi-account\n \"\"\"\n list = [\"px-account\",\"portworx-pvc-controller-account\",\"px-lh-account\",\"px-csi-account\"]\n oc_adm_cmd = \"oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:\"\n for scc in list:\n cmd = oc_adm_cmd+scc\n TR.info(methodName,\"Run get_nodes command %s\"%cmd)\n try:\n retcode = check_output(['bash','-c', cmd]) \n TR.info(methodName,\"Completed %s command with return value %s\" %(cmd,retcode))\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n\n cmd = \"oc adm policy add-scc-to-user anyuid system:serviceaccount:default:default\"\n try:\n retcode = check_output(['bash','-c', cmd])\n TR.info(methodName,\"Completed %s command with return value %s\" %(cmd,retcode))\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n cmd = \"oc adm policy add-scc-to-user anyuid system:serviceaccount:kube-system:px-lh-account\"\n try:\n retcode = check_output(['bash','-c', cmd]) \n TR.info(methodName,\"Completed %s command with return value %s\" %(cmd,retcode))\n except CalledProcessError as e:\n TR.error(methodName,\"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output)) \n TR.info(methodName,\"Done Updating SCC for Portworx Installation\")", "def notifyLoginChanged(self, oldLogin, principal):\n # A user with the new login already exists\n if principal.login in self.__id_by_login:\n raise ValueError('Principal Login already taken!, '+principal.login)\n\n del self.__id_by_login[oldLogin]\n self.__id_by_login[principal.login] = principal.__name__", "def update_user():", "def lsits(self, lsits: List[LsitsParam]):\n\n self._lsits = lsits", "def update_distributors(parts, distributors):\n # This loops through all the parts and finds any that are sourced from\n # local distributors that are not normally searched and places them into\n # the distributor disctionary.\n for part in parts:\n # Find the various distributors for this part by\n # looking for leading fields terminated by SEPRTR.\n for key in part.fields:\n try:\n dist = key[:key.index(SEPRTR)]\n except ValueError:\n continue\n\n # If the distributor is not in the list of web-scrapable distributors,\n # then it's a local distributor. Copy the local distributor template\n # and add it to the table of distributors.\n # Note: If the user excludes a web-scrapable distributors (using --exclude)\n # and then adds it as a local distributor (using fields) it will be added.\n if dist not in distributors:\n debug_overview('Creating \\'{}\\' local distributor profile...'.format(dist))\n new_dist = distributor_class.get_distributor_template('local_template')\n new_dist.label.name = dist # Set dist name for spreadsheet header.\n distributor_class.add_distributor(dist, new_dist)\n distributors.append(dist)\n dist_local_template.api_distributors.append(dist)", "def updateEntries(iface, lineList, trans, errorTolerant, verbose):\n\n entryList = []\n\n for line in lineList:\n\n # Split\n line = (line.strip()).split()\n if(not line):\n continue\n\n # First is LFN\n lfn = line.pop(0)\n entry = DlsEntry(DlsFileBlock(lfn)) \n\n # Then the LFN's attributes (key=val)\n attrDict = {}\n while(line):\n token=line[0]\n pos = token.find('=')\n if( pos == -1):\n break\n else:\n line.pop(0)\n attrDict[token[:pos]] = token[(pos+1):] \n entry.fileBlock.attribs = attrDict\n\n # Then the SEs\n attrDict = {} \n se = ''\n for token in line:\n pos = token.find('=')\n if( pos == -1):\n if(se):\n loc = DlsLocation(se)\n loc.attribs = attrDict\n entry.locations.append(loc)\n se = token\n attrDict = {}\n else:\n attrDict[token[:pos]] = token[(pos+1):]\n \n # And the last one (that is left...)\n if(se):\n entry.locations.append(DlsLocation(se, attrDict))\n\n # Store all the entries\n entryList.append(entry)\n\n if(verbose >= 2):\n print \"--DlsApi.update(\",\n for i in entryList: print i, \";\",\n print \")\"\n\n if(trans): errorTolerant = False\n # Finally, do the update (and let the caller deal with the exception...)\n iface.update(entryList, trans = trans, session = not trans, errorTolerant = errorTolerant)", "def user_login(change):\n return change()", "def run_lsusers(self, expanded, unexpanded) :\n\t\tif not unexpanded :\n\t\t\tunexpanded = [\"*\"]\t# List all users\n\t\tif not self.HasPerms(self.__context.acl_users, 'Manage users') :\n\t\t\treturn -1\n\t\tresult = []\n\t\tfor username in self.__context.acl_users.getUserNames() :\n\t\t\tfor uname in unexpanded :\n\t\t\t\tif fnmatch.fnmatchcase(username, uname) :\n\t\t\t\t\tuser = self.__context.acl_users.getUser(username)\n\t\t\t\t\tresult.append({ \"UserName\": username, \"Roles\": string.join(user.getRoles(), ', '), \"InContext\": string.join(user.getRolesInContext(self.__context), ', '), \"Domains\": string.join(user.getDomains(), ', ') })\n\t\tself.tableDisplay(\"lsusers\", [\"UserName\", \"Roles\", \"InContext\", \"Domains\"], result)", "def __init__(self, options):\n\n super(ShellScript, self).__init__(options)\n self.lims = s4.clarity.LIMS(options.lims_root_uri, options.username, options.password, options.dry_run, options.insecure)", "def update_user(self, queue: SubnetQueue, *args):", "def update_sysadmin_users():\n \n require('environment', provided_by=env.environments)\n servers = ec2_instances(filters=env.filters, cls=OpenRuralWebInstance,\n inst_kwargs={'deploy_user': env.deploy_user,\n 'instance_type': env.instance_type})\n for server in servers:\n server.create_users()\n server.update_deployer_keys()" ]
[ "0.54186535", "0.54110336", "0.53969145", "0.49532676", "0.49296567", "0.47788838", "0.47631562", "0.47610638", "0.46270508", "0.46098492", "0.4609678", "0.46089858", "0.46063384", "0.45893776", "0.45665368", "0.455108", "0.4519849", "0.45181343", "0.45152643", "0.45023364", "0.45000267", "0.44948715", "0.44786143", "0.4453544", "0.44529304", "0.44499755", "0.44390318", "0.4438023", "0.44296625", "0.4420956" ]
0.623942
0
Updates the snmp_configuration for the given LI [Arguments]
def fusion_api_update_snmp_configuration(self, body=None, uri=None, api=None, headers=None): param = '/snmp-configuration' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def ra_llc_configuration_set(host_id, llc_configuration_fields, llc_configuration_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n result = ''\n err1 = [0, 0, 0, 0, 0]\n form_name = ['ARQ Mode', 'ArqWin(Retransmit Window Size)', 'Frame Loss Threshold',\n 'Leaky Bucket Timer', 'Frame Loss Time Out']\n param = []\n dictarr = []\n resultarray = {}\n param.append('llcArqEnable.1')\n param.append('arqWin.1')\n param.append('frameLossThreshold.1')\n param.append('leakyBucketTimerVal.1')\n param.append('frameLossTimeout.1')\n ra_llc_config = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n ra_llc_config = sqlalche_obj.session.query(SetOdu16RALlcConfTable).filter(\n SetOdu16RALlcConfTable.config_profile_id == device_param_list[0][4]).first()\n for i in range(len(llc_configuration_fields)):\n oidname = oid_name[llc_configuration_fields[i]]\n oidtype = oid_type[llc_configuration_fields[i]]\n oidvalue = llc_configuration_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n val = ''\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\"Values Updated in UBR LLC Form\", \"%s\" % (user_name))\n for j in range(0, len(llc_configuration_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = llc_configuration_param[j]\n dict[\"textbox\"] = llc_configuration_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n ra_llc_config.llc_arq_enable = llc_configuration_param[0]\n if err1[1] == 1:\n ra_llc_config.arq_win = llc_configuration_param[1]\n if err1[2] == 1:\n ra_llc_config.frame_loss_threshold = llc_configuration_param[2]\n if err1[3] == 1:\n ra_llc_config.leaky_bucket_timer_val = llc_configuration_param[3]\n if err1[4] == 1:\n ra_llc_config.frame_loss_timeout = llc_configuration_param[4]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16RALlcConfTable'\n resultarray['formAction'] = 'Llc_Cancel_Configuration.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def update_configuration(ConfigurationId=None, Data=None, Description=None):\n pass", "def fusion_api_reapply_li_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.li.update(body=None, uri=uri, api=api, headers=headers, param=param)", "def update_conf_obj(self, nn_id, input_data):\n try:\n obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)\n data_set = getattr(obj, \"automl_parms\")\n data_set.update(input_data)\n setattr(obj, \"automl_parms\", data_set)\n obj.save()\n return input_data\n except Exception as e:\n raise Exception(e)", "def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)", "def conf_update(self):\n pass", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def handle_snmpconf():\n return 0", "def update(self, args):\n pass", "def UpdateConfig(self, instalog_config, update_info, env):\n if update_info.get('data_truncate', {}).get('enable', False):\n # If enable data_truncate, Instalog truncate once a day.\n instalog_config['buffer']['args']['truncate_interval'] = 86400\n\n threshold = update_info.get('input_http', {}).get(\n 'log_level_threshold', logging.NOTSET)\n instalog_config['input']['http_in']['args']['log_level_threshold'] = (\n threshold)\n\n if update_info.get('forward', {}).get('enable', False):\n args = update_info.get('forward', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_pull_socket_port\n instalog_config['output']['forward'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('forward')\n\n if update_info.get('customized_output', {}).get('enable', False):\n args = update_info.get('customized_output', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_customized_output_port\n instalog_config['output']['customized_output'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append(\n 'customized_output')\n\n if update_info.get('archive', {}).get('enable', False):\n instalog_config['output']['archive'] = {\n 'plugin': 'output_archive',\n 'args': update_info.get('archive', {}).get('args', {}).copy()\n }\n # Set the target_dir.\n target_dir = os.path.join(env.umpire_data_dir, 'instalog_archives')\n instalog_config['output']['archive']['args']['target_dir'] = target_dir\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('archive')", "def update_log_config(self, monitor_name, log_config):\n pass", "def update_cm_config(self, config_list):\n return self._put(endpoint='{}/cm/config'.format(self.api_version),\n data=config_list).json()", "def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)", "def fusion_api_update_li_telemetry_configuration(self, body=None, uri=None, api=None, headers=None):\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=\"\")", "def update(*args):", "def config_set(self,args):\n data = args\n try:\n for i in data: \n self.sname = i[0]\n self.kname = i[1]\n self.vname = i[2]\n self.config.set(self.sname,self.kname,self.vname)\n logger.info('Kname: '+self.kname+' was set.')\n return \n except Exception as e:\n logger.error(e)\n return 1", "def update_global_config(self, config, **kwargs):\n pass", "def update(self, **kwargs):\n for k, v in kwargs.items():\n if k not in VALID_CONFIG_KEYS:\n cprint(\"war\", f\"'{k}' is not a valid key, skipping...\")\n continue\n\n if v:\n v = self._validate_option(k, v)\n self.data[k] = v", "def Update(self,n,l):\n\t\tself.n = n\n\t\tself.l = l", "def update_config(doc, signum):\n log = logging.getLogger(__name__)\n log.info('Caught signal %d (%s). Reloading configuration.', signum, '/'.join(SIGNALS_INT_TO_NAME[signum]))\n if not GLOBAL_MUTABLE_CONFIG['--config']:\n log.warning('No previously defined configuration file. Nothing to read.')\n return\n\n # Read config.\n try:\n config = _get_arguments(doc)\n except DocoptcfgFileError as exc:\n logging.getLogger(__name__).error('Config file specified but invalid: %s', exc.message)\n return\n\n # Resolve relative paths.\n _real_paths(config)\n\n # Validate.\n try:\n _validate_config(config)\n except ConfigError:\n return\n\n # Update.\n GLOBAL_MUTABLE_CONFIG.update(config)\n\n # Re-setup logging.\n setup_logging(GLOBAL_MUTABLE_CONFIG)\n log.info('Done reloading configuration.')", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def set_rsyslog_new_configuration():\n with open(rsyslog_conf_path, \"rt\") as fin:\n with open(\"tmp.txt\", \"wt\") as fout:\n for line in fin:\n if \"imudp\" in line or \"imtcp\" in line:\n # Load configuration line requires 1 replacement\n if \"load\" in line:\n fout.write(line.replace(\"#\", \"\", 1))\n # Port configuration line requires 2 replacements\n elif \"port\" in line:\n fout.write(line.replace(\"#\", \"\", 2))\n else:\n fout.write(line)\n else:\n fout.write(line)\n command_tokens = [\"sudo\", \"mv\", \"tmp.txt\", rsyslog_conf_path]\n write_new_content = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)\n time.sleep(3)\n o, e = write_new_content.communicate()\n if e is not None:\n handle_error(e,\n error_response_str=\"Error: could not change Rsyslog.conf configuration in -\" + rsyslog_conf_path)\n return False\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def update(\n self,\n ConfigurationRetries=None,\n EnableAccmNegotiation=None,\n EnableIpV4=None,\n EnableIpV6=None,\n EnableLqm=None,\n EnableMpls=None,\n EnableOsi=None,\n Enabled=None,\n LocalIpAddress=None,\n LocalIpV6IdType=None,\n LocalIpV6Iid=None,\n LocalIpV6MacBasedIid=None,\n LocalIpV6NegotiationMode=None,\n LqmReportInterval=None,\n PeerIpV6IdType=None,\n PeerIpV6Iid=None,\n PeerIpV6MacBasedIid=None,\n PeerIpV6NegotiationMode=None,\n RetryTimeout=None,\n RxAlignment=None,\n RxMaxReceiveUnit=None,\n SelectedSpeeds=None,\n TxAlignment=None,\n TxMaxReceiveUnit=None,\n UseMagicNumber=None,\n ):\n # type: (int, bool, bool, bool, bool, bool, bool, bool, str, str, str, str, str, int, str, str, str, str, int, int, int, List[str], int, int, bool) -> Ppp\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def update_host_config(self, hostid, config, **kwargs):\n pass", "def do_update(self, arg):\n if type(arg) == str:\n arg_list = shlex.shlex(arg)\n arg_list.wordchars += \"-\"\n arg_list = list(arg_list)\n try:\n idx_start = arg_list.index(\"[\")\n idx_end = arg_list.index(\"]\")\n list_str = \"\".join(arg_list[idx_start:idx_end + 1])\n list_str = eval(list_str)\n list_start = arg_list[:idx_start]\n list_end = arg_list[idx_end + 1:]\n arg_list = list_start\n arg_list.append(list_str)\n arg_list.extend(list_end)\n except ValueError:\n pass\n else:\n arg_list = arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n if len(arg_list) == 3 and type(arg_list[2]) == dict:\n obj = storage.all()[key]\n for key, val in arg_list[2].items():\n setattr(obj, key, val)\n obj.save()\n return\n if len(arg_list) < 3:\n print(\"** attribute name missing **\")\n return\n if len(arg_list) < 4:\n print(\"** value missing **\")\n return\n obj = storage.all()[key]\n if type(arg_list[3]) != list:\n arg_list[3].replace('\"', \"\").replace(\"'\", \"\")\n setattr(obj, arg_list[2].replace('\"', \"\").replace(\"'\", \"\"),\n arg_list[3])\n obj.save()", "def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])", "def omc_conf_set(host_id, omc_fields, omc_config, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n err1 = [0, 0]\n result = \"\"\n param = []\n resultarray = {}\n param.append('omcIpAddress.1')\n param.append('periodicStatsTimer.1')\n form_name = ['OMC IP address', 'Periodic Statistics Timer']\n dictarr = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n odu16_omc_conf_table = sqlalche_obj.session.query(SetOdu16OmcConfTable).filter(\n SetOdu16OmcConfTable.config_profile_id == device_param_list[0][4]).all()\n result += str(odu16_omc_conf_table)\n for i in range(len(omc_fields)):\n omc_oid = oid_name[omc_fields[i]]\n omc_type = oid_type[omc_fields[i]]\n omc_type_val = omc_config[i]\n result += snmp_set(device_param_list[0][0], device_param_list[0][1], device_param_list[0][2], device_param_list[\n 0][3], omc_oid, omc_type, omc_type_val)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n # el.log_event( \"description detail\" , \"user_name\" )\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR UNMP Form\", \"%s\" % (user_name))\n if int(err1[0]) == 1:\n odu16_omc_conf_table[0].omc_ip_address = omc_config[0]\n if int(err1[1]) == 1:\n odu16_omc_conf_table[0].periodic_stats_timer = omc_config[1]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n for j in range(0, len(omc_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = omc_config[j]\n dict[\"textbox\"] = omc_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err != '':\n raise Set_exception\n except Set_exception, e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16OmcConfTable'\n resultarray['formAction'] = 'omc_config_form.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)" ]
[ "0.5679741", "0.56726307", "0.56405234", "0.5542471", "0.5516537", "0.54477245", "0.5404502", "0.5267286", "0.52630067", "0.5219275", "0.51195866", "0.50946283", "0.508972", "0.50685936", "0.5062348", "0.5054096", "0.50422263", "0.5028846", "0.49959874", "0.49718717", "0.4971426", "0.49588746", "0.49415603", "0.49199486", "0.49127623", "0.49122408", "0.4907659", "0.4905165", "0.48869047", "0.48781678" ]
0.67885226
0
Updates the sflow_configuration for the given LI [Arguments]
def fusion_api_update_sflow_configuration(self, body=None, uri=None, api=None, headers=None): param = '/sflow-configuration' return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_flow(self, conf, dpid, flow_id, params):\n\t\tpass", "def update_flow(self, flow):\r\n self.flow = flow", "def config_attributes(dut, **kwargs):\n cli_type = st.get_ui_type(dut, **kwargs)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n sflow_key = kwargs.get(\"sflow_key\", \"global\")\n command = \"\"\n commands = list()\n if \"sample_rate\" in kwargs and \"interface_name\" in kwargs:\n if cli_type == \"click\":\n command += \"config sflow interface sample-rate {} {}\".format(kwargs[\"interface_name\"], kwargs[\"sample_rate\"])\n commands.append(command)\n elif cli_type == \"klish\":\n interface_details = utils_obj.get_interface_number_from_name(kwargs[\"interface_name\"])\n if not interface_details:\n st.log(\"Interface details not found {}\".format(interface_details))\n return False\n commands.append(\"interface {} {}\".format(interface_details.get(\"type\"), interface_details.get(\"number\")))\n if \"no_form\" in kwargs:\n command = \"no sflow sampling-rate\"\n else:\n command = \"sflow sampling-rate {}\".format(kwargs[\"sample_rate\"])\n commands.append(command)\n commands.append(\"exit\")\n elif cli_type == \"rest\":\n data = {\"sonic-sflow:sample_rate\":int(kwargs[\"sample_rate\"])}\n url = \"{}/SFLOW_SESSION/SFLOW_SESSION_LIST={}/sample_rate\".format(REST_URI, kwargs[\"interface_name\"])\n output = st.rest_modify(dut, url, data)\n st.log(\"REST config_attributes SAMPLE RATE OUTPUT -- {}\".format(output))\n if output and output[\"status\"] != 204:\n return False\n return True\n else:\n st.log(\"UNSUPPORTED CLI TYPE -- {}\".format(cli_type))\n return False\n st.config(dut, commands, type=cli_type)\n if \"polling_interval\" in kwargs:\n if cli_type == \"click\":\n command += \"config sflow polling-interval {};\".format(kwargs[\"polling_interval\"])\n commands.append(command)\n elif cli_type == \"klish\":\n if \"no_form\" in kwargs:\n command = \"no sflow polling-interval\"\n else:\n command = \"sflow polling-interval {}\".format(kwargs[\"polling_interval\"])\n commands.append(command)\n elif cli_type == \"rest\":\n data = {\"sonic-sflow:polling_interval\":int(kwargs[\"polling_interval\"])}\n url = \"{}/SFLOW/SFLOW_LIST={}/polling_interval\".format(REST_URI, sflow_key)\n output = st.rest_modify(dut, url, data)\n st.log(\"REST config_attributes POLLING RATE OUTPUT -- {}\".format(output))\n if output and output[\"status\"] != 204:\n return False\n return True\n else:\n st.log(\"UNSUPPORTED CLI TYPE -- {}\".format(cli_type))\n return False\n st.config(dut, commands, type=cli_type)\n return True", "def configSFlow(self, ifname, collector, sampling, polling):\n\n net = self.net\n info(\"**** [G2]: enabling sFlow:\\n\")\n sflow = 'ovs-vsctl -- --id=@sflow create sflow agent=%s target=%s sampling=%s polling=%s --' % (ifname, collector, sampling, polling)\n for s in net.switches:\n sflow += ' -- set bridge %s sflow=@sflow' % s\n info(\"**** [G2]: \" + ' '.join([s.name for s in net.switches]) + \"\\n\")\n quietRun(sflow)", "def fusion_api_reapply_sas_li_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.sasli.put(body=None, uri=uri, api=api, headers=headers, param=param)", "def fusion_api_reapply_li_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.li.update(body=None, uri=uri, api=api, headers=headers, param=param)", "def conf_update(self):\n pass", "def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)", "def update_configuration(ConfigurationId=None, Data=None, Description=None):\n pass", "def UpdateFlowMod(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[bool, None]\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"updateFlowMod\", payload=payload, response_object=None)", "def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)", "def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def update_config(config, args):\n if args.cfg:\n _update_config_from_file(config, args.cfg)\n config.defrost()\n if args.dataset:\n config.DATA.DATASET = args.dataset\n if args.batch_size:\n config.DATA.BATCH_SIZE = args.batch_size\n config.DATA.BATCH_SIZE_EVAL = args.batch_size\n if args.batch_size_eval:\n config.DATA.BATCH_SIZE_EVAL = args.batch_size_eval\n if args.image_size:\n config.DATA.IMAGE_SIZE = args.image_size\n if args.accum_iter:\n config.TRAIN.ACCUM_ITER = args.accum_iter\n if args.data_path:\n config.DATA.DATA_PATH = args.data_path\n if args.output:\n config.SAVE = args.output\n if args.eval:\n config.EVAL = True\n if args.pretrained:\n config.MODEL.PRETRAINED = args.pretrained\n if args.resume:\n config.MODEL.RESUME = args.resume\n if args.last_epoch:\n config.TRAIN.LAST_EPOCH = args.last_epoch\n if args.amp: # only for training\n config.AMP = not config.EVAL\n config.freeze()\n return config", "def fusion_api_update_li_telemetry_configuration(self, body=None, uri=None, api=None, headers=None):\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=\"\")", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def update(self, args):\n pass", "def update_global_config(self, config, **kwargs):\n pass", "def update(*args):", "def update_shed_config(self, shed_conf):\n for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):\n if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:\n self._dynamic_tool_confs[index] = shed_conf\n self._save_integrated_tool_panel()", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config", "def update_config_file(invoker: AirflowInvoker) -> None:\n airflow_cfg_path = invoker.files[\"config\"]\n logging.debug(f\"Generated default '{str(airflow_cfg_path)}'\")\n\n # open the configuration and update it\n # now we let's update the config to use our stubs\n airflow_cfg = configparser.ConfigParser()\n\n with airflow_cfg_path.open() as cfg:\n airflow_cfg.read_file(cfg)\n logging.debug(f\"Loaded '{str(airflow_cfg_path)}'\")\n\n config = invoker.plugin_config_processed\n for section, cfg in config.items():\n airflow_cfg[section].update(cfg)\n logging.debug(f\"\\tUpdated section [{section}] with {cfg}\")\n\n with airflow_cfg_path.open(\"w\") as cfg:\n airflow_cfg.write(cfg)\n logging.debug(f\"Saved '{str(airflow_cfg_path)}'\")", "def UpdateConfig(self, instalog_config, update_info, env):\n if update_info.get('data_truncate', {}).get('enable', False):\n # If enable data_truncate, Instalog truncate once a day.\n instalog_config['buffer']['args']['truncate_interval'] = 86400\n\n threshold = update_info.get('input_http', {}).get(\n 'log_level_threshold', logging.NOTSET)\n instalog_config['input']['http_in']['args']['log_level_threshold'] = (\n threshold)\n\n if update_info.get('forward', {}).get('enable', False):\n args = update_info.get('forward', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_pull_socket_port\n instalog_config['output']['forward'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('forward')\n\n if update_info.get('customized_output', {}).get('enable', False):\n args = update_info.get('customized_output', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_customized_output_port\n instalog_config['output']['customized_output'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append(\n 'customized_output')\n\n if update_info.get('archive', {}).get('enable', False):\n instalog_config['output']['archive'] = {\n 'plugin': 'output_archive',\n 'args': update_info.get('archive', {}).get('args', {}).copy()\n }\n # Set the target_dir.\n target_dir = os.path.join(env.umpire_data_dir, 'instalog_archives')\n instalog_config['output']['archive']['args']['target_dir'] = target_dir\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('archive')", "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def config_set(self,args):\n data = args\n try:\n for i in data: \n self.sname = i[0]\n self.kname = i[1]\n self.vname = i[2]\n self.config.set(self.sname,self.kname,self.vname)\n logger.info('Kname: '+self.kname+' was set.')\n return \n except Exception as e:\n logger.error(e)\n return 1", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)" ]
[ "0.68084615", "0.61187977", "0.59682405", "0.58530384", "0.57871956", "0.5644216", "0.5578467", "0.5413738", "0.53650093", "0.5348528", "0.53186816", "0.5275245", "0.52426374", "0.52174234", "0.5212929", "0.51983064", "0.51878506", "0.5145924", "0.51448166", "0.51402605", "0.51290905", "0.512453", "0.507512", "0.50530905", "0.503807", "0.5028787", "0.5027913", "0.5006258", "0.5005744", "0.5000192" ]
0.71294343
0
Deletes an LSG from the appliance based on name OR uri [Arguments]
def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None): return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fusion_api_delete_lig(self, name=None, uri=None, api=None, headers=None, etag=None):\n return self.lig.delete(name=name, uri=uri, api=api, headers=headers, etag=etag)", "def fusion_api_delete_sas_lig(self, name=None, uri=None, api=None, headers=None):\n return self.saslig.delete(name=name, uri=uri, api=api, headers=headers)", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def fusion_api_delete_sas_li(self, name=None, uri=None, api=None, headers=None):\n return self.sasli.delete(name=name, uri=uri, api=api, headers=headers)", "def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)", "def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}", "def delete(self, *args, **kwargs):\n\n lns_euid = None\n lgtw_euid = None\n\n if args[0]:\n try:\n lns_euid = EUI64(args[0]).id6\n except ValueError as err: \n self.set_status(400)\n self.finish({\"status_code\":400,\"title\":\"Value error (lns_euid)\",\"detail\":str(err)})\n\n if len(args) == 2:\n if args[1]:\n try:\n lgtw_euid = EUI64(args[1]).id6\n except ValueError as err: \n self.set_status(400)\n self.finish({\"status_code\":400,\"title\":\"Value error (lgtw_euid)\",\"detail\":str(err)})\n\n if len(args) == 2 and lns_euid and lgtw_euid:\n self.service.remove_lgtw(lns_euid, lns_euid)\n elif len(args) == 2 and not lns_euid and args[1]:\n self.service.remove_lgtw(lns_euid)\n elif lns_euid:\n lns_euid = lns_euid\n print(self.service.lgtws)\n for lgtw_euid in self.service.lgtws:\n self.service.remove_lgtw(lgtw_euid, lns_euid)\n else:\n for lns_euid in self.service.lnss:\n for lgtw_euid in self.service.lgtws:\n self.service.remove_lgtw_from_lns(lgtw_euid, lns_euid)", "def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)", "def site_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete site \"{1}\"'.format(self.APP_CMD, name))", "def delete_suggester(DomainName=None, SuggesterName=None):\n pass", "def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)", "def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)", "def fusion_api_delete_vsn_range(self, name=None, uri=None, api=None, headers=None):\n return self.vsnrange.delete(name, uri, api, headers)", "def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def catalog_delete(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete(args.path, headers)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e", "def delete(self, name):\n\n pass", "def remove(name):", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def delete_app(self, name):\n raise NotImplementedError", "def catalog_alias_delete(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n alias.delete_ermrest_alias(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e", "def delete(self, uri, where, selectionArgs):\n pass", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')", "def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))" ]
[ "0.65715736", "0.6537951", "0.64739513", "0.6469143", "0.64559233", "0.6451616", "0.6421078", "0.637283", "0.62559646", "0.6248884", "0.62425286", "0.6233732", "0.62126493", "0.61967176", "0.6192574", "0.6154742", "0.60829335", "0.60448134", "0.60233885", "0.5990966", "0.59899026", "0.59822965", "0.59579504", "0.59561723", "0.5951248", "0.5944455", "0.59372485", "0.59355783", "0.59345585", "0.59281254" ]
0.75466216
0
Gets a default or paginated collection of LSGs. [Arguments]
def fusion_api_get_lsg(self, uri=None, param='', api=None, headers=None): return self.lsg.get(uri=uri, param=param, api=api, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit", "def _get_glossaries(self, limit=-1, offset=0, sort_order=\"ASC\"):\n results = None\n atlas_endpoint = self.endpoint_url + \"/glossary\"\n logging.debug(\"Retreiving all glossaries from catalog\")\n\n # TODO: Implement paging with offset and limit\n getResult = requests.get(\n atlas_endpoint,\n params={\"limit\": limit, \"offset\": offset, \"sort\": sort_order},\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(getResult)\n\n return results", "def get_germplasm(\n germplasm_p_u_i: Optional[str] = Query(None, alias='germplasmPUI'),\n germplasm_db_id: Optional[str] = Query(None, alias='germplasmDbId'),\n germplasm_name: Optional[str] = Query(None, alias='germplasmName'),\n common_crop_name: Optional[str] = Query(None, alias='commonCropName'),\n accession_number: Optional[str] = Query(None, alias='accessionNumber'),\n collection: Optional[str] = None,\n genus: Optional[str] = None,\n species: Optional[str] = None,\n study_db_id: Optional[str] = Query(None, alias='studyDbId'),\n synonym: Optional[str] = None,\n parent_db_id: Optional[str] = Query(None, alias='parentDbId'),\n progeny_db_id: Optional[str] = Query(None, alias='progenyDbId'),\n external_reference_i_d: Optional[str] = Query(None, alias='externalReferenceID'),\n external_reference_source: Optional[str] = Query(\n None, alias='externalReferenceSource'\n ),\n page: Optional[int] = None,\n page_size: Optional[int] = Query(None, alias='pageSize'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n) -> GermplasmListResponse:\n pass", "def global_service_collection():\n\tglobal global_lsc\n\t# If this is the first call then the object is not yet created\n\tif not global_lsc:\n\t\t# Create the global object\n\t\tglobal_lsc = LadonServiceCollection()\n\treturn global_lsc", "def get(self, *args):\n return _libsbml.ListOf_get(self, *args)", "def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)", "def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])", "def list(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"_models.StaticSiteCollection\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.StaticSiteCollection\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-01-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('StaticSiteCollection', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def get(self):\r\n return get_all()", "def get(self, *args):\n return _libsbml.ListOfGraphicalObjects_get(self, *args)", "def fusion_api_get_ls(self, uri=None, api=None, headers=None, param=''):\n return self.ls.get(uri=uri, api=api, headers=headers, param=param)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]", "def lst() :\n return s.lst()", "def getSlaves():", "def get_many(self, request, **kwargs):\n return []", "def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit", "def Run(self, args):\n orgs_client = organizations.Client()\n return orgs_client.List(limit=args.limit, page_size=args.page_size)", "def get(self, *args):\n return self.docs.get(*args)", "def get_list(self, *args, **kwargs):\n pass", "def get_list(self, *args, **kwargs):\n pass", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n qs = QSManager(request.args, self.schema)\n objects_count, objects = self._data_layer.get_collection(qs, kwargs)\n\n schema_kwargs = getattr(self, 'get_schema_kwargs', dict())\n schema_kwargs.update({'many': True})\n\n schema = compute_schema(self.schema,\n schema_kwargs,\n qs,\n qs.include)\n\n result = schema.dump(objects).data\n\n view_kwargs = request.view_args if getattr(self, 'view_kwargs', None) is True else dict()\n add_pagination_links(result,\n objects_count,\n qs,\n url_for(self.view, **view_kwargs))\n\n result.update({'meta': {'count': objects_count}})\n\n self.after_get(result)\n return result", "def _granule_samples(found_collections, filters, limit, config):\n found_granules = []\n for concept in found_collections:\n query = {\"concept_id\": concept}\n granules = search(query, filters=filters, limit=limit, config=config)\n found_granules.extend(granules)\n return found_granules[:len(found_collections)*limit]", "def get_list(self, request, **kwargs):\n # TODO: Uncached for now. Invalidation that works for everyone may be\n # impossible.\n objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))\n sorted_objects = self.apply_sorting(objects, options=request.GET)\n\n paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(),\n limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)\n to_be_serialized = paginator.page()\n\n # Dehydrate the bundles in preparation for serialization.\n bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]\n to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles]\n to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)\n\n self.add_shit_to_meta(request, to_be_serialized)\n\n return self.create_response(request, to_be_serialized)", "def get_site_collection(self, request):\n\n objects = self.get()\n\n groups = [\n ('topics', request.translate(_(\"Topics\"))),\n ('news', request.translate(_(\"Latest news\"))),\n ('imagesets', request.translate(_(\"Photo Albums\"))),\n ('forms', request.translate(_(\"Forms\"))),\n ('directories', request.translate(_(\"Directories\"))),\n ('resources', request.translate(_(\"Resources\"))),\n ]\n\n links = []\n\n for id, label in groups:\n for obj in objects[id]:\n # in addition to the default url/name pairings we use a group\n # label which will be used as optgroup label\n links.append({\n 'group': label,\n 'name': obj.title,\n 'url': request.link(obj)\n })\n\n return links", "def get_galleries(self):\n data = self._get('get_gallery_list')\n return data['galleries']" ]
[ "0.5902179", "0.56460506", "0.5590701", "0.54809374", "0.5435606", "0.54336655", "0.53895336", "0.52304566", "0.52200544", "0.5212413", "0.5187521", "0.5119294", "0.5119294", "0.5119294", "0.5119294", "0.5119294", "0.51137084", "0.50779617", "0.50483704", "0.50458235", "0.50396174", "0.5032487", "0.5007824", "0.5001672", "0.5001672", "0.4982065", "0.4974589", "0.4962694", "0.49438348", "0.49358544" ]
0.5673093
1
Gets the default settings for LSGs. [Example] ${resp} = Fusion Api Get Lsg Default Settings| |
def fusion_api_get_lsg_default_settings(self, api=None, headers=None): return self.lsg.get(api=api, param='/defaultSettings', headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDefaultSettings():\n return {}", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def fusion_api_get_lsg_setting(self, uri, settingsId=None, api=None, headers=None):\n param = '/settings/%s' % (settingsId)\n return self.lsg.get(uri=uri, api=api, param=param, headers=headers)", "def getDefault():", "def default():\n return DefaultSwh.default()", "def get(option, default = None):\n\treturn _cfg.get('rosshm', option, fallback = default)", "def getdefault(self, option, type=str, default=None):\r\n return self.get(Config.DEFAULT_SECTION, option, type, default=default)", "def defaultConf():\n from config import lwbdUrl, userAndPass\n baseUrl = lwbdUrl\n lucidAuth = userAndPass\n return LucidSdaConfiguration(baseUrl,\n lucidAuth)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def getDefaultConfig():\n config = {\n \"samples\": _DEFAULT_SAMPLE_COUNT,\n \"channel\": \"all\",\n \"rate\": _DEFAULT_SAMPLE_RATE,\n \"update\": 1,\n \"output\": \"data.rld\",\n \"format\": \"rld\",\n \"size\": _DEFAULT_FILE_SIZE,\n \"comment\": _DEFAULT_FILE_COMMENT,\n \"digital\": True,\n \"ambient\": False,\n \"aggregate\": \"downsample\",\n \"high-range\": [],\n \"web\": False,\n }\n return config", "def fusion_api_get_global_settings(self, uri=None, api=None, headers=None, param=''):\n return self.settings.get(uri, api, headers, param)", "def default_settings(self, settings):\n return {}", "def default_user_settings(self) -> pulumi.Output['outputs.DomainUserSettings']:\n return pulumi.get(self, \"default_user_settings\")", "def get(self, name, default=''):\n return getattr(settings, name, DEFAULT_SETTINGS.get(name, default))", "def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default", "def get_setting(setting_name, default=None):\n settings_dict = getattr(settings, 'SIMPLE_FORUMS', None)\n\n if settings_dict:\n return settings_dict.get(setting_name, default)\n\n return default", "def getDefaultL3ParserSettings():\n return _libsbml.getDefaultL3ParserSettings()", "def default_space_settings(self) -> pulumi.Output[Optional['outputs.DomainDefaultSpaceSettings']]:\n return pulumi.get(self, \"default_space_settings\")", "def get_default_options():\n out = _SFrame({'name': ['method', 'feature_model', 'verbose'],\n 'default_value' : ['lsh', 'auto', 'True'],\n 'lower_bound': [None, None, 0],\n 'upper_bound': [None, None, 1],\n 'description': ['Method for searching reference data',\n 'Trained model for extracting features from raw data objects',\n 'Whether progress output is printed'],\n 'parameter_type': ['string', 'model', 'boolean']})\n\n return out", "def get_defaults():\r\n profile = settings.profile_manager.get(\"default\")\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig, storage_args=['Global'], read_only=True)\r\n return {\r\n \"video_directory\": config.videodir,\r\n \"oauth2_token\": os.path.join(settings.configdir, \"oauth2_token.json\"),\r\n \"client_secrets\": os.path.join(settings.configdir, \"client_secrets.json\")\r\n }", "def get_default():\n # default_config = configparser.ConfigParser(allow_no_value=True)\n #\n # default_config.add_section(\"General\")\n # general = default_config[\"General\"]\n # general[\"PermanentLogPath\"] = r\"/home/pi/automationwebserver.log\"\n # general[\"TempLogPath\"] = r\"/var/ramdrive/test.txt\"\n #\n # default_config.add_section(\"ArduinoLink\")\n # arduino = default_config[\"ArduinoLink\"]\n # arduino[\"ArdIPAddress\"] = \"192.168.2.35\"\n # arduino[\"ArdTerminalPort\"] = \"53201\"\n # arduino[\"ArdDatastreamPort\"] = \"53202\"\n # arduino[\"RPiIPAddress\"] = \"192.168.2.34\"\n # arduino[\"RpiTerminalPort\"] = \"53201\"\n # arduino[\"RpiDatastreamPort\"] = \"53202\"\n #\n # default_config.add_section(\"Databases\")\n # databases = default_config[\"Databases\"]\n # databases[\"HostAddress\"] = \"localhost\"\n # databases[\"HostPort\"] = \"3306\"\n # default_config['REALTIME'] = {'databasename': 'testname', 'user': 'testuser',\n # 'password': 'testpassword', 'max_rows': '10'}\n # default_config['HISTORY'] = {'databasename': 'testname', 'user': 'testuser',\n # 'password': 'testpassword'}\n #\n # default_config.add_section(\"DataTransfer\")\n # default_config.set(\"DataTransfer\", r\"# see https://docs.python.org/3.6/library/struct.html#struct.unpack\", None)\n # datatransfer = default_config[\"DataTransfer\"]\n # datatransfer[\"ProtocolVersion\"] = 'a'\n # default_config[\"SensorReadings\"] = {\"tablename\": \"PoolHeaterSensorValues\",\n # \"unpackformat\": \"<Hff?fffffffffff\",\n # \"fieldnames\":\n # \"sim_flags solar_intensity cumulative_insolation\"\\\n # \" surge_tank_ok pump_runtime\"\\\n # \" hx_hot_inlet_inst hx_hot_inlet_smooth\"\\\n # \" hx_hot_outlet_inst hx_hot_outlet_smooth\"\\\n # \" hx_cold_inlet_inst hx_cold_inlet_smooth\"\\\n # \" hx_cold_outlet_inst hx_cold_outlet_smooth\"\\\n # \" temp_ambient_inst temp_ambient_smooth\"\n # }\n # default_config[\"Status\"] = {\"tablename\": \"PoolHeaterStatus\",\n # \"unpackformat\": \"<B?BB?BBBBBB\",\n # \"fieldnames\":\n # \"assert_failure_code realtime_clock_status\"\\\n # \" logfile_status ethernet_status\"\\\n # \" solar_intensity_reading_invalid\"\\\n # \" pump_state\"\\\n # \" hx_hot_inlet_status hx_hot_outlet_status\"\\\n # \" hx_cold_inlet_status hx_cold_outlet_status\"\\\n # \" ambient_status\"\n # }\n return default_config", "def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')", "def bootstrap_default():\n\treturn default_configuration", "def get_defaults(self):\n\t\treturn self.__defaults", "def default():\n return DefaultLinearFresnelDsgIph.default()", "def settings():\n return _get_settings()[1]", "def get_default_bundle():\n response = houston.get(\"/zipline/config\")\n houston.raise_for_status_with_json(response)\n # It's possible to get a 204 empty response\n if not response.content:\n return {}\n return response.json()", "def service_config():\n global _service_config\n if not _service_config:\n r = requests.get('https://tech.lds.org/mobile/ldstools/config.json')\n r.raise_for_status()\n _service_config = r.json()\n return _service_config", "def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }", "def get(self, name):\n try:\n return self._defaults[name]\n except KeyError:\n raise UndefinedDefault(\"default %s is undefined\" % name)" ]
[ "0.6478354", "0.6247138", "0.6241164", "0.6094763", "0.60725147", "0.5992975", "0.58911484", "0.5849506", "0.5788157", "0.5785921", "0.5736645", "0.5732013", "0.5724971", "0.5719785", "0.57157767", "0.5705406", "0.5702473", "0.56992763", "0.56953037", "0.5694682", "0.56941646", "0.5691773", "0.56224877", "0.5584285", "0.55840737", "0.5574378", "0.55741656", "0.55690676", "0.5529381", "0.5523434" ]
0.82086563
0
Gets a particular LSG setting. [Arguments]
def fusion_api_get_lsg_setting(self, uri, settingsId=None, api=None, headers=None): param = '/settings/%s' % (settingsId) return self.lsg.get(uri=uri, api=api, param=param, headers=headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_setting(varname): \n gl = globals()\n if varname not in gl.keys():\n raise ValueError(\"Unknown setting %s\"%varname)\n # Here, possibly add some code to raise exceptions if some\n # parameter isn't set set properly, explaining on how to set it.\n return gl[varname]", "def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit", "def get_setting(self, setting):\n return self.do_rpc(\"get_setting\", key=key)", "def settings():\n return _get_settings()[1]", "def get(*, db_session, setting_name: str) -> Optional[Settings]:\n return _get_settings_by_name(db_session=db_session, setting_name=setting_name)", "def fusion_api_get_lsg(self, uri=None, param='', api=None, headers=None):\n return self.lsg.get(uri=uri, param=param, api=api, headers=headers)", "def get(option, default = None):\n\treturn _cfg.get('rosshm', option, fallback = default)", "def _get_lsp_config_metric(self):\n return self.__lsp_config_metric", "def get(settingName, strict=True):\n if strict and settingName not in _loaded:\n raise ValueError(\"Specified configuration setting \\\"%s\\\" does not exist\" % settingName)\n \n return _loaded.get(settingName, None)", "def get_setting_value(self, title, setting):\r\n return self.parser.get(title, setting)", "def get(self, label):\n if label in self.config[self.env]:\n return self.config[self.env][label]\n else:\n logging.warning(f'Config Mgr->get(): label: {label} not configured')\n return None", "def gsettings_get(self,schema,path,key):\n if path is None:\n gsettings = Gio.Settings.new(schema)\n else:\n gsettings = Gio.Settings.new_with_path(schema,path)\n return gsettings.get_value(key)", "def setting(setting_name):\n\n return getattr(settings, setting_name)", "def get_setting(setting_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSettingResult:\n __args__ = dict()\n __args__['settingName'] = setting_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:costmanagement:getSetting', __args__, opts=opts, typ=GetSettingResult).value\n\n return AwaitableGetSettingResult(\n cache=pulumi.get(__ret__, 'cache'),\n id=pulumi.get(__ret__, 'id'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n scope=pulumi.get(__ret__, 'scope'),\n start_on=pulumi.get(__ret__, 'start_on'),\n type=pulumi.get(__ret__, 'type'))", "def get_setting(setting_name, default=None):\n settings_dict = getattr(settings, 'SIMPLE_FORUMS', None)\n\n if settings_dict:\n return settings_dict.get(setting_name, default)\n\n return default", "def get(self, option, argument=None):\n if argument:\n if self.config.has_section(argument) and (\n self.config.has_option(argument, \"city\") \\\n or self.config.has_option(argument, \"id\") \\\n or self.config.has_option(argument, \"st\")\n ):\n self.config.remove_section(argument)\n import sys\n message = \"WARNING: the city/id/st options are now unsupported in aliases\\n\"\n sys.stderr.write(message)\n if not self.config.has_section(argument):\n guessed = guess(\n argument,\n path=self.get(\"setpath\"),\n info=self.get(\"info\"),\n cache_search=(\n self.get(\"cache\") and self.get(\"cache_search\")\n ),\n cachedir=self.get(\"cachedir\"),\n quiet=self.get_bool(\"quiet\")\n )\n self.config.add_section(argument)\n for item in guessed.items():\n self.config.set(argument, *item)\n if self.config.has_option(argument, option):\n return self.config.get(argument, option)\n if option in self.options.__dict__:\n return self.options.__dict__[option]\n else:\n import os, sys\n message = \"%s error: no URI defined for %s\\n\" % (\n os.path.basename( sys.argv[0] ),\n option\n )\n sys.stderr.write(message)\n exit(1)", "def get_setting_output(setting_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSettingResult]:\n ...", "def __getitem__(self, name):\n\n return self._settings[name]", "def get_global(self, key, default=None, as_tuple=False):\n if as_tuple:\n return (self.get_global(key, default, as_tuple=False), True)\n else:\n return self.settings.get(key, default)", "def get_setting(self, id):\n return __settings__.getSetting(id)", "def get_setting(setting, override=None):\n attr_name = 'MUSES_{0}'.format(setting)\n if hasattr(settings, attr_name):\n return getattr(settings, attr_name)\n else:\n if hasattr(defaults, setting):\n return getattr(defaults, setting)\n else:\n return override", "def get(self, section, option, *args):\n cnt = self._check_args('get', 3, 4, args)\n try:\n return ConfigParser.RawConfigParser.get(self, section, option)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n if cnt == 1:\n return args[0]\n raise", "def get_setting(self, category, setting):\n category = self.get_setting_category(category)\n if not category:\n return None\n try:\n return category[setting]\n except KeyError:\n return None", "def fusion_api_get_lsg_default_settings(self, api=None, headers=None):\n return self.lsg.get(api=api, param='/defaultSettings', headers=headers)", "def fusion_api_get_global_settings(self, uri=None, api=None, headers=None, param=''):\n return self.settings.get(uri, api, headers, param)", "def get(flag=\"rainbow\"):\n return flags[flag]", "def get_option(self, option):\n\t\treturn self.options[option]", "def get_config(group):\n config = toml.load('./config.toml')\n return config[group]", "def _get_course_setting(self, setting):\n return settings.data['courses'][self.course_id][setting]", "def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit" ]
[ "0.65687746", "0.60098463", "0.5887475", "0.5856363", "0.58437705", "0.57676584", "0.5724383", "0.57229125", "0.56246775", "0.5606557", "0.55877066", "0.55760974", "0.5558709", "0.5546795", "0.55110544", "0.5481453", "0.5443491", "0.5443213", "0.5432782", "0.5383437", "0.5334687", "0.5283548", "0.52551836", "0.5241833", "0.5235592", "0.52335477", "0.522242", "0.5220157", "0.5216143", "0.5211221" ]
0.68152773
0