query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Creates a new IPsecPolicy.
def create_ipsecpolicy(self, body=None): return self.post(self.ipsecpolicies_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)", "def test_create_ipsecpolicy_with_limited_params(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n name = 'ipsecpolicy1'\r\n auth_algorithm = 'sha1'\r\n encryption_algorithm = 'aes-128'\r\n encapsulation_mode = 'tunnel'\r\n pfs = 'group5'\r\n transform_protocol = 'esp'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n\r\n args = [name,\r\n '--tenant-id', tenant_id]\r\n\r\n position_names = ['name', 'auth_algorithm', 'encryption_algorithm',\r\n 'encapsulation_mode',\r\n 'transform_protocol', 'pfs',\r\n 'tenant_id']\r\n\r\n position_values = [name, auth_algorithm, encryption_algorithm,\r\n encapsulation_mode,\r\n transform_protocol, pfs,\r\n tenant_id]\r\n\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "def create_ingress_security_policy(self):\n if not VncSecurityPolicy.ingress_svc_fw_policy_uuid:\n ingress_svc_fw_policy_uuid =\\\n VncSecurityPolicy.create_firewall_policy(\n self._k8s_event_type,\n None, None, is_global=True)\n VncSecurityPolicy.add_firewall_policy(ingress_svc_fw_policy_uuid)\n VncSecurityPolicy.ingress_svc_fw_policy_uuid =\\\n ingress_svc_fw_policy_uuid", "def create_firewall_policy(self, body=None):\r\n return self.post(self.firewall_policies_path, body=body)", "def test_create_ipsecpolicy_all_params(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n name = 'ipsecpolicy1'\r\n description = 'first-ipsecpolicy1'\r\n auth_algorithm = 'sha1'\r\n encryption_algorithm = 'aes-256'\r\n encapsulation_mode = 'tunnel'\r\n pfs = 'group5'\r\n transform_protocol = 'ah'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n lifetime = 'units=seconds,value=20000'\r\n\r\n args = [name,\r\n '--description', description,\r\n '--tenant-id', tenant_id,\r\n '--auth-algorithm', auth_algorithm,\r\n '--encryption-algorithm', encryption_algorithm,\r\n '--transform-protocol', transform_protocol,\r\n '--encapsulation-mode', encapsulation_mode,\r\n '--lifetime', lifetime,\r\n '--pfs', pfs]\r\n\r\n position_names = ['name', 'auth_algorithm', 'encryption_algorithm',\r\n 'encapsulation_mode', 'description',\r\n 'transform_protocol', 'pfs',\r\n 'tenant_id']\r\n\r\n position_values = [name, auth_algorithm, encryption_algorithm,\r\n encapsulation_mode, description,\r\n transform_protocol, pfs,\r\n tenant_id]\r\n extra_body = {\r\n 'lifetime': {\r\n 'units': 'seconds',\r\n 'value': 20000,\r\n },\r\n }\r\n\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n extra_body=extra_body)", "def CreateSecurityPolicy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateSecurityPolicy\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateSecurityPolicyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_policy(policystore_url, create_policy_request, verbose):\n\n if verbose:\n logging.info('Creating policy')\n pprint.pprint(create_policy_request)\n\n create_url = policystore_url + POLICYSTORE_PREFIX + 'CreateEntitlementPolicy'\n\n r = requests.post(\n create_url, headers=headers(), json=create_policy_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n\n sys.exit('Failed to create policy')\n\n resp = r.json()\n\n logging.info(\n f'SUCCESS: Created policy - ID: {resp[\"policy_id\"]}, Token: {resp[\"token\"]}'\n )\n\n return resp", "def create_ikepolicy(self, body=None):\r\n return self.post(self.ikepolicies_path, body=body)", "def Create(self,\n firewall_policy=None,\n parent_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeCreateRequestTuple(firewall_policy, parent_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Insert(\n self._MakeCreateRequestTuple(firewall_policy, parent_id)[2])\n return self.WaitOperation(\n op_res, message='Creating the organization firewall policy.')", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def minimum_packet_rate_rule_create(request, policy_id, **kwargs):\n body = {'minimum_packet_rate_rule': kwargs}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'minimum_packet_rate_rule': kwargs}\n rule = 'minimum_packet_rate_rule'\n minimum_packet_rate_rule = neutronclient(request)\\\n .create_minimum_packet_rate_rule(policy_id, body).get(rule)\n return MinimumPacketRateRule(minimum_packet_rate_rule)", "def create_policy(self, create_policy_details, **kwargs):\n resource_path = \"/policies\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def create_policy(self, policy_name, policy_document, delete=True, **kwargs):\n try:\n Oprint.info('Creating IAM policy {}'.format(policy_name), 'iam')\n \n policy = self.get_policy(policy_name=policy_name)\n if policy and policy.get('Policy'):\n if not delete:\n Oprint.info('Found existing IAM policy {}'.format(policy_name), 'iam')\n return policy\n else:\n # Can not delete a policy if it has been attached\n if policy.get('Policy').get('AttachmentCount') > 0:\n Oprint.warn('Policy {} already exists and has been attached to a role. Cannot delete'.format(policy.get('Policy').get('PolicyName')), 'iam')\n return policy\n\n self._client.delete_policy(PolicyArn=self.get_policy_arn(policy_name))\n \n policy = self._client.create_policy(PolicyName=policy_name, PolicyDocument=policy_document, **kwargs)\n\n Oprint.info('IAM policy {} has been created'.format(policy_name), 'iam')\n except Exception as e:\n Oprint.err(e, 'iam')\n\n return policy", "def create_policy(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_policy`\")\n\n resource_path = '/oapi/v1/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_vpc_if_policy_group(self, name, aep_name):\n policy_group_mo = AccBndlGrp('uni/infra/funcprof/', name, lagT='node')\n self.commit(policy_group_mo)\n # if attachable entity profile does not exists, creates a new one\n class_query = ClassQuery('infraAttEntityP')\n class_query.propFilter = 'eq(infraAttEntityP.name, \"' + AEP_PREFIX + aep_name + '\")'\n pd_list = self.moDir.query(class_query)\n if len(pd_list) == 0:\n vlan_pool_mo = self.create_vlan_pool(VLAN_POOL_PREFIX + aep_name, 'static')\n DomP_mo = self.create_physical_domain(PD_PREFIX + aep_name, str(vlan_pool_mo.dn))\n AttEntityP_mo = self.create_attachable_entity_profile(AEP_PREFIX + aep_name, str(DomP_mo.dn))\n else:\n AttEntityP_mo = pd_list[0]\n # Assign attached entity profile\n self.commit(\n RsAttEntP(policy_group_mo.dn, tDn=str(AttEntityP_mo.dn))\n )\n # Assign interface policies. For non-defaults, check if is already created. If not, the system will create them\n IfPolmo = self.moDir.lookupByDn('uni/infra/cdpIfP-CDP-ON')\n if not IfPolmo:\n IfPolmo = IfPol('uni/infra','CDP-ON',adminSt='enabled')\n self.commit(IfPolmo)\n self.commit(\n RsCdpIfPol(policy_group_mo.dn, tnCdpIfPolName=IfPolmo.name)\n )\n self.commit(\n RsHIfPol(policy_group_mo.dn, tnFabricHIfPolName='default')\n )\n self.commit(\n RsL2IfPol(policy_group_mo.dn, tnL2IfPolName='default')\n )\n LagPolmo = self.moDir.lookupByDn('uni/infra/lacplagp-LACP')\n if not LagPolmo:\n LagPolmo = LagPol('uni/infra', 'LACP', mode='active')\n self.commit(LagPolmo)\n self.commit(\n RsLacpPol(policy_group_mo.dn, tnLacpLagPolName=LagPolmo.name)\n )\n self.commit(\n RsLldpIfPol(policy_group_mo.dn, tnLldpIfPolName='default')\n )\n self.commit(\n RsMcpIfPol(policy_group_mo.dn, tnMcpIfPolName='default')\n )\n self.commit(\n RsMonIfInfraPol(policy_group_mo.dn, tnMonInfraPolName='default')\n )\n self.commit(\n RsStormctrlIfPol(policy_group_mo.dn, tnStormctrlIfPolName='default')\n )\n self.commit(\n RsStpIfPol(policy_group_mo.dn, tnStpIfPolName='default')\n )\n return policy_group_mo", "def pre_network_policy_create(self, resource_dict):\n pass", "def __init__(self, policy_id=None, policy_name=None, is_policy_enabled=None, policy_target_version=None, policy_deployment_method=None, software_title=None, software_title_configuration_id=None, pending=None, completed=None, deferred=None, failed=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._policy_id = None\n self._policy_name = None\n self._is_policy_enabled = None\n self._policy_target_version = None\n self._policy_deployment_method = None\n self._software_title = None\n self._software_title_configuration_id = None\n self._pending = None\n self._completed = None\n self._deferred = None\n self._failed = None\n self.discriminator = None\n\n if policy_id is not None:\n self.policy_id = policy_id\n if policy_name is not None:\n self.policy_name = policy_name\n if is_policy_enabled is not None:\n self.is_policy_enabled = is_policy_enabled\n if policy_target_version is not None:\n self.policy_target_version = policy_target_version\n if policy_deployment_method is not None:\n self.policy_deployment_method = policy_deployment_method\n if software_title is not None:\n self.software_title = software_title\n if software_title_configuration_id is not None:\n self.software_title_configuration_id = software_title_configuration_id\n if pending is not None:\n self.pending = pending\n if completed is not None:\n self.completed = completed\n if deferred is not None:\n self.deferred = deferred\n if failed is not None:\n self.failed = failed", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def __init__(__self__, *,\n api_version: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,\n spec: Optional[pulumi.Input['PodSecurityPolicySpecArgs']] = None):\n if api_version is not None:\n pulumi.set(__self__, \"api_version\", 'policy/v1beta1')\n if kind is not None:\n pulumi.set(__self__, \"kind\", 'PodSecurityPolicy')\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)", "def post_network_policy_create(self, resource_dict):\n pass", "def _create_ipsec_profile(self, context, connection):\n # Note(asarfaty) the NSX profile can be reused, so we can consider\n # creating it only once in the future, and keeping a use-count for it.\n # There is no driver callback for profiles creation so it has to be\n # done on connection creation.\n ipsec_policy_id = connection['ipsecpolicy_id']\n ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(\n context, ipsec_policy_id)\n\n try:\n profile = self._nsx_vpn.tunnel_profile.create(\n ipsecpolicy['name'] or ipsecpolicy['id'],\n description=ipsecpolicy['description'],\n encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[\n ipsecpolicy['encryption_algorithm']],\n digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[\n ipsecpolicy['auth_algorithm']],\n dh_group=ipsec_utils.PFS_MAP[ipsecpolicy['pfs']],\n pfs=True,\n sa_life_time=ipsecpolicy['lifetime']['value'],\n tags=self._nsx_tags(context, connection))\n except nsx_lib_exc.ManagerError as e:\n msg = _(\"Failed to create a tunnel profile: %s\") % e\n raise nsx_exc.NsxPluginException(err_msg=msg)\n return profile['id']", "def __init__(__self__, *,\n policy: pulumi.Input[str],\n resource_arn: pulumi.Input[str]):\n pulumi.set(__self__, \"policy\", policy)\n pulumi.set(__self__, \"resource_arn\", resource_arn)", "def gen_network_policy(project, entries):\n pol = NetworkPolicy(name='default',\n parent_obj=project,\n network_policy_entries=entries)\n return pol", "def create_ipsec_site_connection(self,ipsecpolicy_id, ikepolicy_id,\n vpnservice_id, psk, peer_cidrs,\n peer_address, peer_id,\n ipsec_body = None, **kwargs):\n _body = {\n \"psk\":psk,\n \"peer_cidrs\":peer_cidrs,\n \"ipsecpolicy_id\":ipsecpolicy_id,\n \"ikepolicy_id\":ikepolicy_id,\n \"vpnservice_id\":vpnservice_id,\n \"peer_address\":peer_address,\n \"peer_id\":peer_id\n }\n if ipsec_body and type(ipsec_body) == dict :\n _body.update(ipsec_body)\n body = {\"ipsec_site_connection\":_body}\n return self._post(self.ipsec_site_connections_path, body=body)", "def __init__(__self__, *,\n policy_id: str,\n policy_version: str,\n policy_parameters: Optional['outputs.PolicyParametersResponse'] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n pulumi.set(__self__, \"policy_version\", policy_version)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }", "def __init__(__self__, *,\n policy: Optional[pulumi.Input[str]] = None,\n resource_arn: Optional[pulumi.Input[str]] = None):\n if policy is not None:\n pulumi.set(__self__, \"policy\", policy)\n if resource_arn is not None:\n pulumi.set(__self__, \"resource_arn\", resource_arn)", "def add_policy(self, sec, ptype, rule):\n self._save_policy_line(ptype, rule)" ]
[ "0.6523855", "0.63184994", "0.60983205", "0.58342034", "0.5829371", "0.5804038", "0.5802355", "0.57480276", "0.56827766", "0.5611309", "0.5586934", "0.55758935", "0.5554747", "0.5554747", "0.55451876", "0.55408686", "0.55269873", "0.5513526", "0.5468993", "0.54595256", "0.5349384", "0.53320843", "0.5326034", "0.5323844", "0.53223944", "0.53164506", "0.52967197", "0.52749985", "0.5254553", "0.5250045" ]
0.7491522
0
Deletes the specified IPsecPolicy.
def delete_ipsecpolicy(self, ipsecpolicy): return self.delete(self.ipsecpolicy_path % (ipsecpolicy))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def policy_delete(request, policy_id):\n neutronclient(request).delete_qos_policy(policy_id)", "def delete_policy(policy_id):\n policy = PolicyService.get_policy_by_id(policy_id)\n if policy is None:\n abort(404)\n\n policy.delete()\n\n return {}", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def delete_policy(policystore_url, policy_credentials, verbose):\n\n if verbose:\n logging.info('Deleting policy')\n pprint.pprint(policy_credentials)\n\n delete_url = policystore_url + POLICYSTORE_PREFIX + 'DeleteEntitlementPolicy'\n\n r = requests.post(delete_url, headers=headers(), json=policy_credentials)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to delete policy')\n\n logging.info('SUCCESS: Deleted policy')", "def delete_policy(self, policy_name):\r\n return self.connection.delete_lb_policy(self.name, policy_name)", "def delete_policy(self, policy_ref: str) -> None:\n self.batch_write(\n [self.batch_detach_policy(policy_ref, obj_ref) for obj_ref in self.list_policy_attachments(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n self.batch_write(\n [self.batch_detach_object(parent_ref, link_name) for parent_ref, link_name in self.list_object_parents(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n retry(**cd_read_retry_parameters)(cd_client.delete_object)(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': policy_ref})", "def delete_firewall_policy(self, firewall_policy):\r\n return self.delete(self.firewall_policy_path % (firewall_policy))", "def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)", "def delete_ikepolicy(self, ikepolicy):\r\n return self.delete(self.ikepolicy_path % (ikepolicy))", "def Delete(self, fp_id=None, batch_mode=False, only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeDeleteRequestTuple(fp_id=fp_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Delete(self._MakeDeleteRequestTuple(fp_id=fp_id)[2])\n operation_poller = DeletePoller(self._service, self.ref)\n return self.WaitOperation(\n op_res,\n operation_poller=operation_poller,\n message='Deleting the organization firewall policy.')", "def delete_qos_policy(self, name_or_id):\n if not self._has_neutron_extension('qos'):\n raise exc.OpenStackCloudUnavailableExtension(\n 'QoS extension is not available on target cloud'\n )\n policy = self.network.find_qos_policy(name_or_id)\n if not policy:\n self.log.debug(\"QoS policy %s not found for deleting\", name_or_id)\n return False\n\n self.network.delete_qos_policy(policy)\n\n return True", "def post_network_policy_delete(self, resource_id, resource_dict):\n pass", "def delete_policy(self, policy_id, **kwargs):\n resource_path = \"/policies/{policyId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"policyId\": policy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def pre_network_policy_delete(self, resource_id):\n pass", "def remove_policy(self, sec, ptype, rule):\n line = self.convert_to_item(ptype, rule)\n\n _id = line['id']['S']\n\n self.dynamodb.delete_item(\n Key={\n 'id': {\n 'S': _id,\n }\n },\n TableName=self.table_name,\n )\n\n return True", "def delete_group_policy(self, group_name, policy_name):\r\n params = {'GroupName' : group_name,\r\n 'PolicyName' : policy_name}\r\n return self.get_response('DeleteGroupPolicy', params, verb='POST')", "def delete(nitro, policypatset):\r\n __policypatset = NSPatset()\r\n __policypatset.set_name(policypatset.get_name())\r\n return __policypatset.delete_resource(nitro)", "def dynamic_vnic_conn_policy_delete(handle, name, parent_dn=\"org-root\"):\n mo = dynamic_vnic_conn_policy_get(handle, name, parent_dn)\n if not mo:\n raise UcscOperationError(\"dynamic_vnic_conn_policy_delete\",\n \"Dynamic vNIC Connectivity Policy \"\n \"does not exist\")\n handle.remove_mo(mo)\n handle.commit()", "def deletion_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deletion_policy\")", "def deletion_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"deletion_policy\")", "def DeleteSecurityPolicy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteSecurityPolicy\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteSecurityPolicyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def remove_policy(self, sec, ptype, rule):\r\n deleted_count = self._delete_policy_lines(ptype, rule)\r\n return deleted_count > 0", "def delete(self, request, l7_policy_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_l7_policy,\n l7_policy_id,\n load_balancer_getter=l7_policy_get_load_balancer_id,\n resource_id=l7_policy_id)", "def DeleteAssociation(self,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeDeleteAssociationRequestTuple(firewall_policy_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveAssociation(\n self._MakeDeleteAssociationRequestTuple(firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting the association for the organization firewall policy.'\n )", "def delete_metric_policy(ContainerName=None):\n pass", "def delete_retention_policy(self) -> pulumi.Output[Optional['outputs.DeleteRetentionPolicyResponse']]:\n return pulumi.get(self, \"delete_retention_policy\")", "def policy_delete_all(session, domain, path=\"/\"):\n client = session.client('iam')\n resp = client.list_policies(Scope='Local', PathPrefix=path)\n\n prefix = domain.replace('.', '-')\n for policy in resp.get('Policies', []):\n if policy['PolicyName'].startswith(prefix):\n ARN = policy['Arn']\n if policy['AttachmentCount'] > 0:\n # cannot delete a policy if it is still in use\n attached = client.list_entities_for_policy(PolicyArn=ARN)\n for group in attached.get('PolicyGroups', []):\n client.detach_group_policy(GroupName=group['GroupName'], PolicyArn=ARN)\n for user in attached.get('PolicyUsers', []):\n client.detach_user_policy(UserName=user['UserName'], PolicyArn=ARN)\n for role in attached.get('PolicyRoles', []):\n client.detach_role_policy(RoleName=role['RoleName'], PolicyArn=ARN)\n client.delete_policy(PolicyArn=ARN)", "def delete_bucket_policy(Bucket=None):\n pass", "def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n return self.client.delete_load_balancer_policy(**params)", "def deletion_policy(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"deletion_policy\")" ]
[ "0.761043", "0.6945922", "0.67073476", "0.6707107", "0.6587773", "0.65314233", "0.65010214", "0.6463276", "0.6461859", "0.6396434", "0.60502285", "0.60077006", "0.5999948", "0.5989723", "0.5949439", "0.5948413", "0.5943496", "0.5850399", "0.5848674", "0.5848674", "0.5815025", "0.5768193", "0.5759123", "0.57526064", "0.5749771", "0.57429147", "0.5722448", "0.57140976", "0.56812316", "0.567073" ]
0.7415815
1
Fetches a list of all load balancer vips for a tenant.
def list_vips(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('vips', self.vips_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_loadbalancers(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-loadbalancers option\"\n )\n\n ret = {}\n conn = get_conn()\n datacenter = get_datacenter(conn)\n\n for item in conn.list_loadbalancers(datacenter[\"id\"])[\"items\"]:\n lb = {\"id\": item[\"id\"]}\n lb.update(item[\"properties\"])\n ret[lb[\"name\"]] = lb\n\n return ret", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_all(self, name=None, label_selector=None):\n # type: (Optional[str], Optional[str]) -> List[BoundLoadBalancer]\n return super(LoadBalancersClient, self).get_all(\n name=name, label_selector=label_selector\n )", "def get_vms(self, user=None, count=None):\n crit = dict()\n if count is not None:\n crit['count'] = count\n s = self._NDL_API('getvms', crit, user)\n if len(s) == 0:\n return []\n ips = s.split(',')\n # if the localhost's IP is in the list, move it to the front\n localips = getmyips()\n for i in range(len(ips)):\n if ips[i] in localips:\n x = ips[i]\n del ips[i]\n return [ x, ] + ips\n # otherwise order does not matter?\n return ips", "def find_load_balancers(self, instance_id): # type: (str) -> List[str]\n elb = self._get_client()\n load_balancer_descriptions = elb.describe_load_balancers()['LoadBalancerDescriptions']\n\n load_balancers = [\n lb['LoadBalancerName'] for lb in load_balancer_descriptions\n if instance_id in [instances['InstanceId'] for instances in lb['Instances']]\n ]\n\n self._logger.info('Found following load balancers: %s', load_balancers)\n\n if len(load_balancers) > 1:\n self._logger.warning('The instance %s is under multiple load balancers: %s', instance_id, load_balancers)\n\n return load_balancers", "def get(self, request):\n conn = get_sdk_connection(request)\n lb_list = _sdk_object_to_list(conn.load_balancer.load_balancers(\n project_id=request.user.project_id))\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, lb_list)\n return {'items': lb_list}", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "async def Available_Endpoints() -> List[Dict[str, str]]:\n return [{\"path\": endpoint} for endpoint in busylightapi.endpoints]", "def get_local_lbs(self):\r\n mask = ('mask[loadBalancerHardware[datacenter],ipAddress]')\r\n return self.account.getAdcLoadBalancers(mask=mask)", "def get_list(\n self,\n name=None, # type: Optional[str]\n label_selector=None, # type: Optional[str]\n page=None, # type: Optional[int]\n per_page=None, # type: Optional[int]\n ):\n # type: (...) -> PageResults[List[BoundLoadBalancer], Meta]\n params = {}\n if name is not None:\n params[\"name\"] = name\n if label_selector is not None:\n params[\"label_selector\"] = label_selector\n if page is not None:\n params[\"page\"] = page\n if per_page is not None:\n params[\"per_page\"] = per_page\n\n response = self._client.request(url=\"/load_balancers\", method=\"GET\", params=params)\n\n ass_load_balancers = [\n BoundLoadBalancer(self, load_balancer_data) for load_balancer_data in response[\"load_balancers\"]\n ]\n return self._add_meta_to_result(ass_load_balancers, response)", "def floating_ip_list(tenant_id, auth_token):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='',\r\n service=\"network\",\r\n path=\"floatingips.json\")\r\n return content", "def get_elbs(elbclient):\r\n try:\r\n resp = elbclient.describe_load_balancers()\r\n return list(map(\r\n lambda x:x['LoadBalancerName'],\r\n resp['LoadBalancerDescriptions']\r\n ))\r\n except Exception as ex:\r\n print(ex.message)\r\n return None", "def get_vlans():\n query = {\"type\": \"op\", \"cmd\": \"<show><vlan>all</vlan></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "def get_ipaddresses(auth):\n url_ipaddresses = \"http://\" + auth.ipaddr + \"/rest/\"+auth.version+\"/ipaddresses\"\n try:\n r = requests.get(url_ipaddresses, headers = auth.cookie)\n ipaddresses = json.loads(r.text)['ip_address_subnet_element']\n return ipaddresses\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + \" get_ipaddresses: An Error has occured\"", "def list_subnets(self, retrieve_all=True, **_params):\r\n return self.list('subnets', self.subnets_path, retrieve_all,\r\n **_params)", "def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def list(self, **params):\n\n _, _, vouchers = self.http_client.get(\"/vouchers\", params=params)\n return vouchers", "def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def get_vlans_list(self, vdc=None):\n vlans = []\n for vdcname in vdc:\n vlans.append(self.vdcs[vdcname].get_vlans_list())\n return vlans", "def list(self, include=\"all\"):\n if include not in [\"all\", \"public\", \"private\"]:\n RutimeError(\"include {} must be all, public or private.\".format(include))\n path = 'vips?include={}'.format(include)\n return self._session.get(path)", "def get_all(self, name: str | None = None) -> list[BoundLoadBalancerType]:\n return self._iter_pages(self.get_list, name=name)", "def list(self, all_tenants=False, **search_opts):\n if not all_tenants:\n tenant_id = self.request.user.tenant_id\n # In Neutron, list_floatingips returns Floating IPs from\n # all tenants when the API is called with admin role, so\n # we need to filter them with tenant_id.\n search_opts['tenant_id'] = tenant_id\n port_search_opts = {'tenant_id': tenant_id}\n else:\n port_search_opts = {}\n fips = self.client.list_floatingips(**search_opts)\n fips = fips.get('floatingips')\n # Get port list to add instance_id to floating IP list\n # instance_id is stored in device_id attribute\n ports = port_list(self.request, **port_search_opts)\n port_dict = collections.OrderedDict([(p['id'], p) for p in ports])\n for fip in fips:\n self._set_instance_info(fip, port_dict.get(fip['port_id']))\n return [FloatingIp(fip) for fip in fips]", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_vendors(self, count: int = 10) -> list:\n return list(itertools.islice(self.client.vendors.get_all_generator(), count))", "def list(self, args):\n try:\n cloud = self._context.getCloudService()\n vdcs = cloud.listVirtualDatacenters()\n pprint_vdcs(vdcs)\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "def list(self, **params):\n # This is to ensure tenant_id key is not populated\n # if tenant_id=None is specified.\n tenant_id = params.pop('tenant_id', self.request.user.tenant_id)\n if tenant_id:\n params['tenant_id'] = tenant_id\n return self._list(**params)", "def get_vlans(cohesity_client):\n vlans = cohesity_client.vlan.get_vlans()\n vlans = vlans if vlans else []\n for vlan in vlans:\n exported_res_dict[\"Vlans\"].append(vlan.iface_group_name)\n return vlans", "def user_ip_list(uid):\r\n session = tables.get_session()\r\n res = []\r\n if session is None:\r\n return res\r\n try:\r\n ip_table = IpAddrs()\r\n res.extend(ip_table.get_ips_by_uid(uid, session))\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user ip list failed: %s', err)\r\n return []\r\n finally:\r\n session.close()\r\n return res", "def get_vendor_bills(self, count: int = 10) -> list:\n return list(\n itertools.islice(self.client.vendor_bills.get_all_generator(), count)\n )" ]
[ "0.5617398", "0.5593716", "0.55149007", "0.54893655", "0.5479464", "0.54651594", "0.5372961", "0.53601766", "0.5348587", "0.5264206", "0.5247198", "0.5216902", "0.52149767", "0.5203361", "0.51989055", "0.51749426", "0.5169607", "0.5164837", "0.5164028", "0.5156722", "0.515319", "0.5102104", "0.50769585", "0.5073106", "0.5067546", "0.5058322", "0.50509393", "0.50294", "0.5026187", "0.50235415" ]
0.66470325
0
Fetches information of a certain load balancer vip.
def show_vip(self, vip, **_params): return self.get(self.vip_path % (vip), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_balancer_ip(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n\n lb_ip = cluster[\"load_balancers\"][0][\"ip\"]\n return lb_ip", "def get(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.find_load_balancer(loadbalancer_id)\n loadbalancer_dict = _get_sdk_object_dict(loadbalancer)\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, [loadbalancer_dict])\n return loadbalancer_dict", "def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug(\"VIP '{0}': {1}\".format(vip_name, vip))\n return vip", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def getAz(ip, response_ilb):\n for i in response_ilb['NetworkInterfaces']:\n logger.info('GetAz: Details about Internal Load Balancer')\n for k in i['PrivateIpAddresses']:\n logger.info('GetAz: IP Address of ILB is :' + k['PrivateIpAddress'])\n if k['PrivateIpAddress'] == ip:\n return i['AvailabilityZone']\n\n return None", "def get(self, request):\n conn = get_sdk_connection(request)\n lb_list = _sdk_object_to_list(conn.load_balancer.load_balancers(\n project_id=request.user.project_id))\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, lb_list)\n return {'items': lb_list}", "def lookup_ip(ikey, skey, host, ip):\n response = client.call_json_api(\n ikey, skey, host, 'GET', '/verify/v1/lookup/ip.json',\n ip=[ip])\n return response", "def slb_ip(self) -> str:\n return pulumi.get(self, \"slb_ip\")", "def _get_ip_resp(api_url: str):\n return get(api_url, headers={'user-agent': USER_AGENT})", "def ip_info_list(ip_addr):\r\n session = tables.get_session()\r\n if session is None:\r\n return {'getData': False}\r\n response = {}\r\n try:\r\n response['getData'] = True\r\n tuning_table = TuningTable()\r\n response['tuning'] = tuning_table.get_all_tunings_by_ip(ip_addr, session)\r\n collection_table = CollectionTable()\r\n response['analysis'] = collection_table.get_all_collection_by_ip(ip_addr, session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get analysis and tuning list failed: %s', err)\r\n return response\r\n finally:\r\n session.close()\r\n return response", "def get_vtep_ip_by_label(cls, client_object, label=None):\n nsxa_socket = cls._get_nsxa_socket(client_object)\n cmd = '%s -t %s vtep/ip %%s' % (cls.CLI, nsxa_socket)\n out = client_object.connection.request(\n cmd % label).response_data.strip()\n return utilities.parse_one_line_output(\n out, record_delim=',', key_val_delim=':')[cls.IP_ADDRESS]", "def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))", "def lookup_robtex_ip_info(self,ip_address):\n if helpers.is_ip(ip_address):\n try:\n request = requests.get(self.robtex_api_endpoint.format(ip_address),timeout=self.requests_timeout)\n ip_json = request.json()\n return ip_json\n except requests.exceptions.Timeout:\n click.secho(\"\\n[!] The connection to Robtex timed out!\",fg=\"red\")\n except requests.exceptions.TooManyRedirects:\n click.secho(\"\\n[!] The connection to Robtex encountered too many redirects!\",fg=\"red\")\n except requests.exceptions.RequestException as error:\n click.secho(\"\\n[!] The connection to Robtex encountered an error!\",fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n return None\n else:\n click.secho(\"[!] The provided IP for Robtex is invalid!\",fg=\"red\")", "def _GetIpAddress(self):\n ingress_name = '%s-ingress' % self.name\n get_cmd = [\n 'get', 'ing', ingress_name, '-o',\n 'jsonpath={.status.loadBalancer.ingress[*].ip}'\n ]\n stdout, _, _ = RunKubectlCommand(get_cmd)\n ip_address = stdout\n if ip_address:\n self.ip_address = ip_address", "def get_ip(self):", "def get_ip_information(self, ip: str) -> Dict:\n response = self._http_request(\n method=\"GET\", url_suffix=f\"/smoke/{ip}\", resp_type=\"response\", ok_codes=(200, 404)\n )\n\n if response.status_code == 429:\n raise Exception(\n \"You have been rate limited by CrowdSec CTI API. Please upgrade to Pro or wait.\"\n )\n\n return response.json()", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def GetLoadBalancerIP(self, service_name):\n get_cmd = [\n 'get', 'service', service_name, '-o',\n 'jsonpath={.status.loadBalancer.ingress[0].ip}'\n ]\n\n stdout, _, _ = RunKubectlCommand(get_cmd)\n\n try:\n # Ensure the load balancer is ready by parsing the output IP\n ip_address = ipaddress.ip_address(stdout)\n except ValueError:\n raise errors.Resource.RetryableCreationError(\n \"Load Balancer IP for service '%s' is not ready.\" % service_name)\n\n return format(ip_address)", "def get_ip(tag,env=None,eip=False):\n api_url = 'http://api.rahulinux.io/ip?host={0}&env={1}&eip={2}'\n try:\n resp = requests.get(api_url.format(tag,env,eip))\n except requests.exceptions.RequestException as e:\n return e\n if len(resp.text) >= 30:\n return resp.text.split()\n return [ resp.text ]", "def ip_lookup(self, ip_address):\r\n obj = self.client['Network_Subnet_IpAddress']\r\n return obj.getByIpAddress(ip_address, mask='hardware, virtualGuest')", "def slb_ip(self) -> Optional[str]:\n return pulumi.get(self, \"slb_ip\")", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None", "async def get_ip():\n\turl = 'https://cheese.formice.com/api/tfm/ip'\n\tdata = await request_api(url)\n\n\tif not len(data):\n\t\t# Empty dictionary, request failed, let's use default server IP\n\t\tsuccess = True\n\telse:\n\t\tsuccess = data.pop('success', False)\n\t\terror = data.pop('error', '').capitalize()\n\t\tdescription = data.pop('description', 'No description was provided.')\n\n\tif not success:\n\t\tif error == 'Maintenance':\n\t\t\traise MaintenanceError('The game is under maintenance.')\n\n\t\tif error == 'Internal':\n\t\t\traise InternalError(description)\n\n\t\traise EndpointError(f'{error}: {description}')\n\n\treturn Keys(version=666, **data.get('server', {}))", "def get_last_banner(self, ip):\n return self.last_banners.get(ip)", "def get_tun_ip(ip_addr, username):\n cmd = \"ifconfig tun0 | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}'\" \n tun_ip = remote_fetch(ip_addr, username, cmd)[0].strip()\n return tun_ip", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def add_virtualip(self, loadbalancer, vip):\n return loadbalancer.add_virtualip(vip)", "def add_virtualip(self, lb, vip):\n resp, body = self.api.method_post(\"/loadbalancers/%s/virtualips\" % lb.id,\n body=vip.to_dict())\n return resp, body", "def publicIP(self):\n return self.query('https://plex.tv/:/ip')" ]
[ "0.66438174", "0.652625", "0.6481885", "0.6382166", "0.63138247", "0.61080116", "0.6099167", "0.6005084", "0.59748024", "0.5965726", "0.5954159", "0.59242725", "0.5908232", "0.5887846", "0.5850761", "0.5808859", "0.5764781", "0.5755382", "0.56931424", "0.56830674", "0.56260073", "0.56124943", "0.56038", "0.5593232", "0.55848044", "0.55732334", "0.5570087", "0.5554089", "0.55345535", "0.5502562" ]
0.6686279
0
Creates a new load balancer vip.
def create_vip(self, body=None): return self.post(self.vips_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vip(self, context, vip, netinfo):\n LOG.info(_(\"Agent received create_vip\"))\n self.driver.create_vip(vip, netinfo)", "def add_virtualip(self, lb, vip):\n resp, body = self.api.method_post(\"/loadbalancers/%s/virtualips\" % lb.id,\n body=vip.to_dict())\n return resp, body", "def add_virtualip(self, loadbalancer, vip):\n return loadbalancer.add_virtualip(vip)", "def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())],\n Scheme='internet-facing',\n Tags=[\n {\n 'Key': 'chops-aws-project',\n 'Value': self.get_aws_project_name(),\n },\n {\n 'Key': 'environment',\n 'Value': app_env,\n },\n ],\n Type='application',\n IpAddressType='ipv4',\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['LoadBalancers'][0]", "def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))", "def create_loadbalancer(call=None, kwargs=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The create_address function must be called with -f or --function.\"\n )\n\n if kwargs is None:\n kwargs = {}\n\n conn = get_conn()\n datacenter_id = get_datacenter_id()\n loadbalancer = LoadBalancer(\n name=kwargs.get(\"name\"), ip=kwargs.get(\"ip\"), dhcp=kwargs.get(\"dhcp\")\n )\n\n response = conn.create_loadbalancer(datacenter_id, loadbalancer)\n _wait_for_completion(conn, response, 60, \"loadbalancer\")\n\n return response", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))", "def post(self, request):\n return create_loadbalancer(request)", "def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')", "def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)", "def create(self, ip): # pylint: disable=invalid-name\n return self.request(\"POST\", data={\"ip\": ip})", "def get_create_load_balancer_flow(self, load_balancer_id, topology, project_id,\n listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(vthunder_tasks.VthunderInstanceBusy(\n requires=a10constants.COMPUTE_BUSY))\n\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n lb_create_flow.add(a10_database_tasks.CheckExistingVthunderTopology(\n requires=constants.LOADBALANCER,\n inject={\"topology\": topology}))\n\n # Attaching vThunder to LB in database\n if topology == constants.TOPOLOGY_ACTIVE_STANDBY:\n lb_create_flow.add(*self._create_active_standby_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n elif topology == constants.TOPOLOGY_SINGLE:\n lb_create_flow.add(*self._create_single_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n else:\n LOG.error(\"Unknown topology: %s. Unable to build load balancer.\",\n topology)\n raise exceptions.InvalidTopology(topology=topology)\n\n # IMP: Now creating vThunder config here\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n vthunder = self._vthunder_repo.get_vthunder_by_project_id(db_apis.get_session(),\n project_id)\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(\n self.get_post_lb_vthunder_association_flow(\n post_amp_prefix, load_balancer_id, topology, vthunder,\n mark_active=(not listeners)))\n lb_create_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n lb_create_flow.add(vthunder_tasks.AllowL2DSR(\n requires=(constants.SUBNET, constants.AMPHORA,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA)))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_flow=True))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_vthunder_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n\n return lb_create_flow", "def post_virtual_ip_create(self, resource_dict):\n pass", "def create_loadbalancer(self, context, loadbalancer, driver_name):\n LOG.info(\"Received request 'Create Loadbalancer' for LB:%(lb)s \"\n \"with driver:%(driver_name)s\",\n {'lb': loadbalancer['id'],\n 'driver_name': driver_name})\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n 'driver_name': driver_name\n }\n self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def pre_virtual_ip_create(self, resource_dict):\n pass", "def vplb():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Virtual Proxy Load Balancing details', 1)\r\n virtualproxylbnodes = get_qlik_sense.get_vploadbalancers()\r\n virtualproxylb_metrics = ['node', 'load balance nodes']\r\n num_of_virtualproxyslb = len(virtualproxylbnodes)\r\n num_of_virtualproxylb_metrics = len(virtualproxylb_metrics)\r\n table = document.add_table(rows=num_of_virtualproxyslb+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n for item in range(0, len(virtualproxylb_metrics)):\r\n row.cells[item].text = virtualproxylb_metrics[item]\r\n\r\n for item in range(num_of_virtualproxyslb):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(virtualproxylbnodes[item]['type'])\r\n row.cells[1].text = str(', '.join(virtualproxylbnodes[item]['items']))\r\n\r\n document.add_page_break()", "def test_create_vip_with_mandatory_params(self):\r\n resource = 'vip'\r\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\r\n pool_id = 'my-pool-id'\r\n name = 'my-name'\r\n subnet_id = 'subnet-id'\r\n protocol_port = '1000'\r\n protocol = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--name', name,\r\n '--protocol-port', protocol_port,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n pool_id]\r\n position_names = ['pool_id', 'name', 'protocol_port', 'protocol',\r\n 'subnet_id', 'tenant_id']\r\n position_values = [pool_id, name, protocol_port, protocol,\r\n subnet_id, tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True)", "def create_vlan_pool(self, vlan_pool_name, allocation_mode):\n VlanInstP_mo = VlanInstP('uni/infra/', vlan_pool_name, allocation_mode)\n self.commit(VlanInstP_mo)\n return VlanInstP_mo", "def test_create_vip_with_mandatory_params(self):\n resource = 'vip'\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\n pool_id = 'my-pool-id'\n name = 'my-name'\n subnet_id = 'subnet-id'\n protocol_port = '1000'\n protocol = 'TCP'\n tenant_id = 'my-tenant'\n my_id = 'my-id'\n args = ['--name', name,\n '--protocol-port', protocol_port,\n '--protocol', protocol,\n '--subnet-id', subnet_id,\n '--tenant-id', tenant_id,\n pool_id]\n position_names = ['pool_id', 'name', 'protocol_port', 'protocol',\n 'subnet_id', 'tenant_id']\n position_values = [pool_id, name, protocol_port, protocol,\n subnet_id, tenant_id]\n self._test_create_resource(resource, cmd, name, my_id, args,\n position_names, position_values,\n admin_state_up=True)", "def create(\n self,\n name, # type: str\n load_balancer_type, # type: LoadBalancerType\n algorithm=None, # type: Optional[LoadBalancerAlgorithm]\n services=None, # type: Optional[List[LoadBalancerService]]\n targets=None, # type: Optional[List[LoadBalancerTarget]]\n labels=None, # type: Optional[Dict[str, str]]\n location=None, # type: Optional[Location]\n network_zone=None, # type: Optional[str]\n public_interface=None, # type: Optional[bool]\n network=None # type: Optional[Union[Network,BoundNetwork]]\n ):\n # type: (...) -> CreateLoadBalancerResponse:\n data = {\"name\": name, \"load_balancer_type\": load_balancer_type.id_or_name}\n if network is not None:\n data[\"network\"] = network.id\n if public_interface is not None:\n data[\"public_interface\"] = public_interface\n if labels is not None:\n data[\"labels\"] = labels\n if algorithm is not None:\n data[\"algorithm\"] = {\"type\": algorithm.type}\n if services is not None:\n service_list = []\n for service in services:\n service_list.append(self.get_service_parameters(service))\n data[\"services\"] = service_list\n\n if targets is not None:\n target_list = []\n for target in targets:\n target_data = {\n \"type\": target.type,\n \"use_private_ip\": target.use_private_ip\n }\n if target.type == \"server\":\n target_data['server'] = {\"id\": target.server.id}\n elif target.type == \"label_selector\":\n target_data['label_selector'] = {\"selector\": target.label_selector.selector}\n elif target.type == \"ip\":\n target_data['ip'] = {\"ip\": target.ip.ip}\n target_list.append(target_data)\n\n data[\"targets\"] = target_list\n\n if network_zone is not None:\n data[\"network_zone\"] = network_zone\n if location is not None:\n data[\"location\"] = location.id_or_name\n\n response = self._client.request(url=\"/load_balancers\", method=\"POST\", json=data)\n\n return CreateLoadBalancerResponse(load_balancer=BoundLoadBalancer(self, response[\"load_balancer\"]),\n action=BoundAction(self._client.actions, response['action']))", "def test_create_vip_with_all_params(self):\r\n resource = 'vip'\r\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\r\n pool_id = 'my-pool-id'\r\n name = 'my-name'\r\n description = 'my-desc'\r\n address = '10.0.0.2'\r\n admin_state = False\r\n connection_limit = '1000'\r\n subnet_id = 'subnet-id'\r\n protocol_port = '80'\r\n protocol = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--name', name,\r\n '--description', description,\r\n '--address', address,\r\n '--admin-state-down',\r\n '--connection-limit', connection_limit,\r\n '--protocol-port', protocol_port,\r\n '--protocol', protocol,\r\n '--subnet-id', subnet_id,\r\n '--tenant-id', tenant_id,\r\n pool_id]\r\n position_names = ['pool_id', 'name', 'description', 'address',\r\n 'admin_state_up', 'connection_limit',\r\n 'protocol_port', 'protocol', 'subnet_id',\r\n 'tenant_id']\r\n position_values = [pool_id, name, description, address,\r\n admin_state, connection_limit, protocol_port,\r\n protocol, subnet_id,\r\n tenant_id]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "def test_create_vip_with_all_params(self):\n resource = 'vip'\n cmd = vip.CreateVip(test_cli20.MyApp(sys.stdout), None)\n pool_id = 'my-pool-id'\n name = 'my-name'\n description = 'my-desc'\n address = '10.0.0.2'\n admin_state = False\n connection_limit = '1000'\n subnet_id = 'subnet-id'\n protocol_port = '80'\n protocol = 'TCP'\n tenant_id = 'my-tenant'\n my_id = 'my-id'\n args = ['--name', name,\n '--description', description,\n '--address', address,\n '--admin-state-down',\n '--connection-limit', connection_limit,\n '--protocol-port', protocol_port,\n '--protocol', protocol,\n '--subnet-id', subnet_id,\n '--tenant-id', tenant_id,\n pool_id]\n position_names = ['pool_id', 'name', 'description', 'address',\n 'admin_state_up', 'connection_limit',\n 'protocol_port', 'protocol', 'subnet_id',\n 'tenant_id']\n position_values = [pool_id, name, description, address,\n admin_state, connection_limit, protocol_port,\n protocol, subnet_id,\n tenant_id]\n self._test_create_resource(resource, cmd, name, my_id, args,\n position_names, position_values)", "def add_virtualip(self, vip):\n return self.manager.add_virtualip(self, vip)", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVirtNet_Create'))", "def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)", "def new_vm():\n\tcfg_path = input(\"\\n\\nInsert the ClickOS .cfg file absolute path:\\n\")\n\n\tbridge_name = get_bridge_name(cfg_path)\n\tif len(bridge_name) == 0:\n\t\tprint(\"Couldnt find the bridge name.\")\n\t\treturn 0\n\n\tcreate_bridge(bridge_name)\n\n\tboot_vm(cfg_path)\n\n\treturn 1", "def create(self, region, **kwargs):\n params = {\n \"region\": region.id if isinstance(region, Base) else region,\n }\n params.update(kwargs)\n\n result = self.client.post(\"/nodebalancers\", data=params)\n\n if not \"id\" in result:\n raise UnexpectedResponseError(\n \"Unexpected response when creating Nodebalaner!\", json=result\n )\n\n n = NodeBalancer(self.client, result[\"id\"], result)\n return n", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def vip(self, vip):\n\n self._vip = vip" ]
[ "0.70703477", "0.6925294", "0.677822", "0.6749381", "0.6702949", "0.6603127", "0.6512064", "0.64853495", "0.63139737", "0.6305228", "0.6186546", "0.6184659", "0.6163926", "0.6135965", "0.61169434", "0.6077778", "0.6031967", "0.59520847", "0.5935914", "0.592049", "0.5902803", "0.5863932", "0.5796835", "0.5790637", "0.57892436", "0.57723534", "0.5758195", "0.57271695", "0.5686507", "0.5652768" ]
0.71251917
0
Updates a load balancer vip.
def update_vip(self, vip, body=None): return self.put(self.vip_path % (vip), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_loadbalancer(self, context, lb, old):\n LOG.debug(\"\\nupdate_loadbalancer({}): called\".format(lb.id))\n hostnames = self._get_hostname(lb)\n # Update the TrafficIP group\n vapv = self._get_vapv(hostnames)\n # Update allowed_address_pairs\n if not old or lb.vip_address != old.vip_address:\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.add_ip_to_ports(\n lb.vip_address, port_ids\n )\n # Update bandwidth allocation\n if old is not None and old.bandwidth != lb.bandwidth:\n self._update_instance_bandwidth(hostnames, lb.bandwidth)", "def vip(self, vip):\n\n self._vip = vip", "def add_virtualip(self, loadbalancer, vip):\n return loadbalancer.add_virtualip(vip)", "def add_virtualip(self, lb, vip):\n resp, body = self.api.method_post(\"/loadbalancers/%s/virtualips\" % lb.id,\n body=vip.to_dict())\n return resp, body", "def put(self, request, loadbalancer_id):\n kwargs = {'loadbalancer_id': loadbalancer_id}\n update_loadbalancer(request, **kwargs)", "def test_update_vip(self):\n resource = 'vip'\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname',\n '--tags', 'a', 'b'],\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_update_vip(self):\r\n resource = 'vip'\r\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def update(self):\n ip = get_ip()\n if ip != self.ip:\n self.ip = ip\n self.ind.set_label(ip)", "def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)", "def handle_put(self, request, user, *args, **kwargs):\n\n try:\n\n self.log.info('Change Option VIP')\n\n id_option_vip = kwargs.get('id_option_vip')\n\n # User permission\n if not has_perm(user, AdminPermission.OPTION_VIP, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n # Load XML data\n xml_map, attrs_map = loads(request.raw_post_data)\n\n # XML data format\n networkapi_map = xml_map.get('networkapi')\n if networkapi_map is None:\n return self.response_error(3, u'There is no value to the networkapi tag of XML request.')\n\n optionvip_map = networkapi_map.get('option_vip')\n if optionvip_map is None:\n return self.response_error(3, u'There is no value to the option_vip tag of XML request.')\n\n # Valid Option VIP ID\n if not is_valid_int_greater_zero_param(id_option_vip):\n self.log.error(\n u'The id_option_vip parameter is not a valid value: %s.', id_option_vip)\n raise InvalidValueError(None, 'id_option_vip', id_option_vip)\n\n # Find Option VIP by ID to check if it exist\n option_vip = OptionVip.get_by_pk(id_option_vip)\n\n with distributedlock(LOCK_OPTIONS_VIP % id_option_vip):\n\n # Valid Option Vip\n option_vip.valid_option_vip(optionvip_map)\n\n try:\n # Update Option Vip\n option_vip.save()\n except Exception, e:\n self.log.error(u'Failed to update the option vip.')\n raise OptionVipError(e, u'Failed to update the option vip')\n\n return self.response(dumps_networkapi({}))\n\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n\n except UserNotAuthorizedError:\n return self.not_authorized()\n\n except XMLError, x:\n self.log.error(u'Error reading the XML request.')\n return self.response_error(3, x)\n\n except OptionVipNotFoundError:\n return self.response_error(289)\n\n except OptionVipError:\n return self.response_error(1)", "def put(self, ip):\n data = request.json\n update_ue_sub(ip, data)\n return None, 204", "def ip_version(self, ip_version):\n\n self._ip_version = ip_version", "def post_virtual_ip_update(self, resource_id, resource_dict):\n pass", "def handle_put(self, request, user, *args, **kwargs):\n\n self.log.info(\"Change VIP's persistence\")\n\n try:\n\n # Commons Validations\n\n # User permission\n if not has_perm(user, AdminPermission.VIP_ALTER_SCRIPT, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n # Valid Vip ID\n vip_id = kwargs.get('id_vip')\n if not is_valid_int_greater_zero_param(vip_id):\n self.log.error(\n u'The vip_id parameter is not a valid value: %s.', vip_id)\n raise InvalidValueError(None)\n\n # Existing Vip ID\n vip = RequisicaoVips.get_by_pk(vip_id)\n\n with distributedlock(LOCK_VIP % vip_id):\n\n vip_old = clone(vip)\n\n # Vip must be created\n if not vip.vip_criado:\n self.log.error(\n u'Persistence can not be changed because VIP has not yet been created.')\n raise RequestVipsNotBeenCreatedError(None)\n\n # Vip equipments permission\n if vip.ip is not None:\n for ip_equipment in vip.ip.ipequipamento_set.all():\n if not has_perm(user, AdminPermission.VIP_ALTER_SCRIPT, AdminPermission.WRITE_OPERATION, None, ip_equipment.equipamento_id, AdminPermission.EQUIP_UPDATE_CONFIG_OPERATION):\n self.log.error(\n u'Groups of equipment registered with the IP of the VIP request is not allowed of acess.')\n raise EquipmentGroupsNotAuthorizedError(None)\n\n if vip.ipv6 is not None:\n for ip_equipment in vip.ipv6.ipv6equipament_set.all():\n if not has_perm(user, AdminPermission.VIP_ALTER_SCRIPT, AdminPermission.WRITE_OPERATION, None, ip_equipment.equipamento_id, AdminPermission.EQUIP_UPDATE_CONFIG_OPERATION):\n self.log.error(\n u'Groups of equipment registered with the IP of the VIP request is not allowed of acess.')\n raise EquipmentGroupsNotAuthorizedError(None)\n\n # Business Validations\n\n # Load XML data\n xml_map, attrs_map = loads(request.raw_post_data)\n\n # XML data format\n networkapi_map = xml_map.get('networkapi')\n if networkapi_map is None:\n return self.response_error(3, u'There is no value to the networkapi tag of XML request.')\n vip_map = networkapi_map.get('vip')\n if vip_map is None:\n return self.response_error(3, u'There is no value to the vip tag of XML request.')\n\n # Get variables\n variables_map = vip.variables_to_map()\n\n # validation of persistence type is doing by set_variables\n persistence = vip_map.get('persistencia', None)\n variables_map['persistencia'] = persistence\n\n # Set variables\n vip.set_variables(variables_map)\n\n # Save VIP\n vip.save(user, commit=True)\n\n # SYNC_VIP\n old_to_new(vip)\n\n # Executar script\n\n # gerador_vips -i <ID_REQUISICAO> --healthcheck\n command = 'gerador_vips -i %d --persistence' % vip.id\n code, stdout, stderr = exec_script(command)\n\n if code == 0:\n success_map = dict()\n success_map['codigo'] = '%04d' % code\n success_map['descricao'] = {\n 'stdout': stdout, 'stderr': stderr}\n\n map = dict()\n map['sucesso'] = success_map\n return self.response(dumps_networkapi(map))\n else:\n vip_old.save(user, commit=True)\n return self.response_error(2, stdout + stderr)\n\n except XMLError, x:\n self.log.error(u'Error reading the XML request.')\n return self.response_error(3, x)\n except ScriptError, s:\n return self.response_error(2, s)\n except EquipmentGroupsNotAuthorizedError:\n return self.response_error(271)\n except RequestVipsNotBeenCreatedError:\n return self.response_error(270, vip_id)\n except InvalidValueError, e:\n return self.response_error(332, e.param, e.value)\n except HealthcheckExpectNotFoundError:\n return self.response_error(124)\n except RequisicaoVipsNotFoundError:\n return self.response_error(152)\n except InvalidFinalidadeValueError:\n return self.response_error(125)\n except InvalidClienteValueError:\n return self.response_error(126)\n except InvalidAmbienteValueError:\n return self.response_error(127)\n except InvalidCacheValueError:\n return self.response_error(128)\n except InvalidMetodoBalValueError:\n return self.response_error(131)\n except InvalidPersistenciaValueError:\n return self.response_error(132)\n except InvalidHealthcheckTypeValueError:\n return self.response_error(133)\n except InvalidPriorityValueError:\n return self.response_error(325)\n except InvalidHealthcheckValueError:\n return self.response_error(134)\n except InvalidTimeoutValueError:\n return self.response_error(135)\n except InvalidHostNameError:\n return self.response_error(136)\n except InvalidMaxConValueError:\n return self.response_error(137)\n except InvalidServicePortValueError, e:\n porta = 'nulo'\n if e.message is not None:\n porta = e.message\n return self.response_error(138, porta)\n except InvalidRealValueError, e:\n real = 'nulo'\n if e.message is not None:\n real = e.message\n return self.response_error(151, real)\n except InvalidBalAtivoValueError:\n return self.response_error(129)\n except InvalidTransbordoValueError, e:\n transbordo = 'nulo'\n if e.message is not None:\n transbordo = e.message\n return self.response_error(130, transbordo)\n except UserNotAuthorizedError:\n return self.not_authorized()\n except IpNotFoundByEquipAndVipError:\n return self.response_error(334, e.message)\n except (RequisicaoVipsError, EquipamentoError, IpError, HealthcheckExpectError, GrupoError), e:\n return self.response_error(1)", "def pre_virtual_ip_update(self, resource_id, resource_dict):\n pass", "def update(self, v_input):\n\n self.v = v_input", "def add_virtualip(self, vip):\n return self.manager.add_virtualip(self, vip)", "def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):\n old_val, new_val = self.get_diff_of_dict(\n old_loadbalancer, loadbalancer)\n arg_dict = {'context': context,\n lb_const.OLD_LOADBALANCER: old_loadbalancer,\n lb_const.LOADBALANCER: loadbalancer,\n }\n LOG.info(\"Received request 'Update Loadbalancer' for LB:%(lb)s \"\n \"with new Param:%(new_val)s and old Param:%(old_val)s\",\n {'lb': loadbalancer['id'],\n 'new_val': new_val,\n 'old_val': old_val})\n self._send_event(lb_const.EVENT_UPDATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def update_pv(self, pvname, value) -> None:\n val = value\n pvname = pvname.replace(f\"{self._prefix}:\", \"\")\n\n self._cached_values.update({pvname: val})\n\n # only update if not running\n if not self._running_indicator.value:\n self._in_queue.put({\"protocol\": self.protocol, \"pvs\": self._cached_values})\n self._cached_values = {}", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def update(self):\n self._state = get_local_ip()", "def for_virtual_ip_address_enter_vip_and_press_apply(driver, vip):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Virtual IP Address\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Virtual IP Address\"]').send_keys(vip)\n wait_on_element(driver, 0.5, 5, '//button[@ix-auto=\"button__APPLY\"]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__APPLY\"]').click()", "def update(self, ip):\n timeout = 60\n LOG.debug(\"Updating '%s' to '%s' at service '%s'\", self.hostname, ip, self._updateurl)\n params = {\"myip\": ip, \"hostname\": self.hostname}\n req = requests.get(self._updateurl, params=params, headers=constants.REQUEST_HEADERS_DEFAULT,\n auth=(self.__userid, self.__password), timeout=timeout)\n LOG.debug(\"status %i, %s\", req.status_code, req.text)\n if req.status_code == 200:\n # responses can also be \"nohost\", \"abuse\", \"911\", \"notfqdn\"\n if req.text.startswith(\"good \") or req.text.startswith(\"nochg\"):\n return ip\n return req.text\n return \"invalid http status code: %s\" % req.status_code", "def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def update(self, name=None, labels=None):\n # type: (Optional[str], Optional[Dict[str, str]]) -> BoundLoadBalancer\n return self._client.update(self, name, labels)", "def update_listener_pool(self, service, name, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n if vip:\n vip[\"pool\"] = name\n for bigip in bigips:\n v = bigip.tm.ltm.virtuals.virtual\n if v.exists(name=vip[\"name\"], partition=vip[\"partition\"]):\n obj = v.load(name=vip[\"name\"], partition=vip[\"partition\"])\n obj.modify(**vip)", "async def put(self, virtual_id, request) -> web.Response:\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {\n \"status\": \"failed\",\n \"reason\": f\"Virtual with ID {virtual_id} not found\",\n }\n return web.json_response(data=response, status=404)\n\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {\n \"status\": \"failed\",\n \"reason\": \"JSON Decoding failed\",\n }\n return web.json_response(data=response, status=400)\n active = data.get(\"active\")\n if active is None:\n response = {\n \"status\": \"failed\",\n \"reason\": 'Required attribute \"active\" was not provided',\n }\n return web.json_response(data=response, status=400)\n\n # Update the virtual's configuration\n try:\n virtual.active = active\n except ValueError as msg:\n response = {\n \"status\": \"failed\",\n \"payload\": {\"type\": \"warning\", \"reason\": str(msg)},\n }\n return web.json_response(data=response, status=202)\n\n # Update ledfx's config\n for idx, item in enumerate(self._ledfx.config[\"virtuals\"]):\n if item[\"id\"] == virtual.id:\n item[\"active\"] = virtual.active\n self._ledfx.config[\"virtuals\"][idx] = item\n break\n\n save_config(\n config=self._ledfx.config,\n config_dir=self._ledfx.config_dir,\n )\n\n response = {\"status\": \"success\", \"active\": virtual.active}\n return web.json_response(data=response, status=200)", "async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def update_port_ip(self, dpid, port, ip):\n # TODO Connection between mac and ip of host?\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n port_id = \"{}:{}\".format(dpid, port)\n try:\n lport = self.nb_api.get(l2.LogicalPort(id=port_id))\n for ip_addr_obj in lport.ips:\n if str(ip_addr_obj) == ip:\n # already learned\n return\n lport.ips.append(ip)\n self.nb_api.update(lport)\n # TODO: Remove old ips\n except DBKeyNotFound:\n # TODO: Create Port?\n print \"Key not Found!!\"" ]
[ "0.69501674", "0.68709505", "0.660137", "0.6559625", "0.6518858", "0.63838536", "0.6332854", "0.63197947", "0.61692894", "0.5989271", "0.59790754", "0.5976584", "0.59753597", "0.5837428", "0.5828339", "0.5752084", "0.56893516", "0.5664997", "0.56476814", "0.5620249", "0.56126976", "0.55561477", "0.55476534", "0.54883575", "0.54409754", "0.5407684", "0.54035425", "0.539143", "0.53845495", "0.5381107" ]
0.74929464
0
Deletes the specified load balancer vip.
def delete_vip(self, vip): return self.delete(self.vip_path % (vip))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_virtualip(self, loadbalancer, vip):\n lb = vip.parent\n if not lb:\n raise exc.UnattachedVirtualIP(\"No parent Load Balancer for this \"\n \"VirtualIP could be determined.\")\n resp, body = self.api.method_delete(\"/loadbalancers/%s/virtualips/%s\" %\n (lb.id, vip.id))\n return resp, body", "def delete_virtualip(self, vip):\n return vip.delete()", "def delete_virtualip(self, vip):\n return self.manager.delete_virtualip(self, vip)", "def delete(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_load_balancer(loadbalancer_id,\n ignore_missing=True,\n cascade=True)", "def delete_loadbalancer(self, context, lb):\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n hostnames = self._get_hostname(lb)\n if deployment_model in [\"PER_TENANT\", \"PER_SUBNET\"]:\n vapv = self._get_vapv(hostnames)\n if not vapv.tip_group.list():\n self._destroy_vapv(hostnames, lb)\n elif deployment_model == \"PER_TENANT\":\n # Delete subnet ports if no longer required\n if self.openstack_connector.subnet_in_use(lb) is False:\n self._detach_subnet_port(vapv, hostnames, lb)\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.delete_ip_from_ports(\n lb.vip_address, port_ids\n )\n elif deployment_model == \"PER_LOADBALANCER\":\n self._destroy_vapv(hostnames, lb)", "def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)", "def delete(self, loadbalancer_id):\n response.status = 201", "def delete(self, load_balancer):\n # type: (LoadBalancer) -> BoundAction\n self._client.request(\n url=\"/load_balancers/{load_balancer_id}\".format(load_balancer_id=load_balancer.id), method=\"DELETE\"\n )\n return True", "def delete_loadbalancer(self, context, loadbalancer):\n LOG.info(\"Received request 'Delete Loadbalancer' for LB:%(lb)s \",\n {'lb': loadbalancer['id']})\n\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n }\n self._send_event(lb_const.EVENT_DELETE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def delete(self):\r\n return self.connection.delete_load_balancer(self.name)", "def delete_balancer(ctx):\n if self.balancer_exists():\n self.delete_balancer()\n ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, nothing to delete.'.format(\n self.get_balancer_name()\n ))", "def delete_balancer(self):\n response = self.client.delete_load_balancer(\n LoadBalancerArn=self.get_balancer_arn()\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200", "def delete(self, request, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_pool,\n pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)", "def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')", "def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )", "def pre_virtual_ip_delete(self, resource_id):\n pass", "def delete_floatingip(self, floatingip):\r\n return self.delete(self.floatingip_path % (floatingip))", "def delete_ip(ip):\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n\n # Deleting single record now\n sql_delete_query = \"\"\"DELETE from Status where ip = ?\"\"\"\n\n cursor.execute(sql_delete_query, [ip])\n sql.commit()\n\n logging.debug(\"Record deleted successfully \")\n\n cursor.close()\n sql.close()", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def delete(self, request, member_id, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_member,\n member_id, pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)", "def delete_public_ip(self, ip=None):\n raise NotImplementedError", "def handle_delete(self, request, user, *args, **kwargs):\n\n try:\n\n self.log.info('Delete Option VIP')\n\n id_option_vip = kwargs.get('id_option_vip')\n\n # User permission\n if not has_perm(user, AdminPermission.OPTION_VIP, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n # Valid Option VIP ID\n if not is_valid_int_greater_zero_param(id_option_vip):\n self.log.error(\n u'The id_option_vip parameter is not a valid value: %s.', id_option_vip)\n raise InvalidValueError(None, 'id_option_vip', id_option_vip)\n\n # Find Option VIP by ID to check if it exist\n option_vip = OptionVip.get_by_pk(id_option_vip)\n\n with distributedlock(LOCK_OPTIONS_VIP % id_option_vip):\n\n try:\n # Delete Option Vip\n option_vip.delete(user)\n except Exception, e:\n self.log.error(u'Failed to delete the option vip.')\n raise OptionVipError(e, u'Failed to delete the option vip')\n\n return self.response(dumps_networkapi({}))\n\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n\n except UserNotAuthorizedError:\n return self.not_authorized()\n\n except OptionVipNotFoundError:\n return self.response_error(289)\n\n except OptionVipError:\n return self.response_error(1)", "def remove_ban(self, vapor_id_or_ip):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}removeBan {}'.format(self.console, identity)\n self.write_command(cmd)", "def del_ip(ip_list, interrupted_ip):\n ip_index = ip_list.index(interrupted_ip)\n del ip_list[ip_index]\n return ip_list", "def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def delete(self, request, flavor_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor(flavor_id,\n ignore_missing=True)", "def remover_ip(self, id_equipamento, id_ip):\n\n if not is_valid_int_param(id_equipamento):\n raise InvalidParameterError(\n u'O identificador do equipamento é inválido ou não foi informado.')\n\n if not is_valid_int_param(id_ip):\n raise InvalidParameterError(\n u'O identificador do ip é inválido ou não foi informado.')\n\n url = 'ip/' + str(id_ip) + '/equipamento/' + str(id_equipamento) + '/'\n\n code, xml = self.submit(None, 'DELETE', url)\n\n return self.response(code, xml)", "def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n return self.client.delete_load_balancer_policy(**params)", "def _delete_vpn(self, request, vpn):\n try:\n #api.quantum.network_delete(request, network.id)\n msg = _('Delete the created VPN \"%s\" '\n 'due to site addition failure.') % vpn_name\n LOG.debug(msg)\n redirect = self.get_failure_url()\n messages.info(request, msg)\n raise exceptions.Http302(redirect)\n #return exceptions.RecoverableError\n except:\n msg = _('Failed to delete VPN %s') % vpn_id\n LOG.info(msg)\n redirect = self.get_failure_url()\n exceptions.handle(request, msg, redirect=redirect)", "def remove_ip(self, ip_id):\n ip_id = ' \"IpAddressResourceId\": %s' % ip_id\n json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)\n json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)\n pprint(json_obj)\n return True if json_obj['Success'] is True else False" ]
[ "0.83227915", "0.7897649", "0.7836446", "0.71675014", "0.7155789", "0.67704123", "0.6738804", "0.6719887", "0.6645137", "0.6587413", "0.648962", "0.64112294", "0.63836735", "0.6236653", "0.6218045", "0.62147665", "0.6185486", "0.6182067", "0.6169506", "0.61605674", "0.60757965", "0.5983839", "0.5894217", "0.5877739", "0.58525974", "0.5827636", "0.5808247", "0.58073443", "0.58024853", "0.57946366" ]
0.817396
1
Fetches a list of all load balancer pools for a tenant.
def list_pools(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('pools', self.pools_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_pools(self) -> List[CachingPool]:\n return await self._pool_fetcher.get_pools()", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def list_pools_on_lbaas_agent(self, lbaas_agent, **_params):\r\n return self.get((self.agent_path + self.LOADBALANCER_POOLS) %\r\n lbaas_agent, params=_params)", "def _get_pools(self, settings):\n available_pools = []\n pool_names = []\n connections_settings = self._get_connections_settings(settings)\n\n # Generate the names of the pools this settings can connect to\n for router_name, _ in connections_settings:\n pool_names.append(router_name)\n\n # Generate the names of the pools this settings can connect to\n for pool in self.__pools.get(settings.get(\"client_id\", \"No id\"), []):\n if pool.name in pool_names:\n available_pools.append(pool)\n return available_pools", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def pools(self) -> List[CachingPool]:\n return self._pool_fetcher.pools", "def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs", "def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret", "def pool_list(mnode):\n cmd = \"gluster pool list\"\n return g.run(mnode, cmd)", "def get_bandwidth_pools(self, mask=None):\n\n if mask is None:\n mask = \"\"\"mask[totalBandwidthAllocated,locationGroup, id, name, projectedPublicBandwidthUsage,\n billingCyclePublicBandwidthUsage[amountOut,amountIn],\n billingItem[id,nextInvoiceTotalRecurringAmount],outboundPublicBandwidthUsage,serviceProviderId,bandwidthAllotmentTypeId,activeDetailCount,endDate]\n \"\"\"\n\n return self.client.call('SoftLayer_Account', 'getBandwidthAllotments', mask=mask, iter=True)", "def listRewardPools(self):\n return self.get_json('/rewardPool')", "def storage_pool_get_all(context, marker=None, limit=None, sort_keys=None,\n sort_dirs=None, filters=None, offset=None):\n session = get_session()\n with session.begin():\n # Generate the query\n query = _generate_paginate_query(context, session, models.StoragePool,\n marker, limit, sort_keys, sort_dirs,\n filters, offset,\n )\n # No storage_pool would match, return empty list\n if query is None:\n return []\n return query.all()", "def get_app_pool_list(self) -> list:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/applicationPools\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n result = list()\n\n for i in range(collection.Count):\n site = collection[i]\n prop = site.Properties\n\n name = prop[\"name\"].Value\n managed_runtime_version = prop[\"managedRuntimeVersion\"].Value\n if prop[\"managedPipelineMode\"].Value:\n managed_pipeline_mode = \"classic\"\n else:\n managed_pipeline_mode = \"integrated\"\n if prop[\"enable32BitAppOnWin64\"].Value:\n bitness = 32\n else:\n bitness = 64\n\n result.append(AppPool(name, managed_runtime_version, managed_pipeline_mode, bitness))\n\n return result", "def list_loadbalancers(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-loadbalancers option\"\n )\n\n ret = {}\n conn = get_conn()\n datacenter = get_datacenter(conn)\n\n for item in conn.list_loadbalancers(datacenter[\"id\"])[\"items\"]:\n lb = {\"id\": item[\"id\"]}\n lb.update(item[\"properties\"])\n ret[lb[\"name\"]] = lb\n\n return ret", "def list_device_pools(arn=None, type=None, nextToken=None):\n pass", "def fusion_api_get_storage_pools(self, uri=None, param='', api=None, headers=None):\n return self.pool.get(uri=uri, api=api, headers=headers, param=param)", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def get_all(self, name=None, label_selector=None):\n # type: (Optional[str], Optional[str]) -> List[BoundLoadBalancer]\n return super(LoadBalancersClient, self).get_all(\n name=name, label_selector=label_selector\n )", "def get_all(self, marker=None, limit=None, sort_key='id',\n sort_dir='asc'):\n context = pecan.request.context\n return self._get_nodepool_policies_collection(marker, limit, sort_key,\n sort_dir)", "def get_list(\n self,\n name=None, # type: Optional[str]\n label_selector=None, # type: Optional[str]\n page=None, # type: Optional[int]\n per_page=None, # type: Optional[int]\n ):\n # type: (...) -> PageResults[List[BoundLoadBalancer], Meta]\n params = {}\n if name is not None:\n params[\"name\"] = name\n if label_selector is not None:\n params[\"label_selector\"] = label_selector\n if page is not None:\n params[\"page\"] = page\n if per_page is not None:\n params[\"per_page\"] = per_page\n\n response = self._client.request(url=\"/load_balancers\", method=\"GET\", params=params)\n\n ass_load_balancers = [\n BoundLoadBalancer(self, load_balancer_data) for load_balancer_data in response[\"load_balancers\"]\n ]\n return self._add_meta_to_result(ass_load_balancers, response)", "def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None", "def list_pools(self):\n search_opts = {'router:external': True}\n return [FloatingIpPool(pool) for pool\n in self.client.list_networks(**search_opts).get('networks')]", "def test_list_pools_sort(self):\r\n resources = \"pools\"\r\n cmd = pool.ListPool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def pools(self, summary=True, tags_intersect=None, tags=None):\n return list(self.all_pools(summary=summary, tags=tags, tags_intersect=tags_intersect))", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def get_pool_ids(host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n pool_ids = []\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1, pool_ids\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n pool_ids.append(p[\"id\"])\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1, pool_ids\n return 0, pool_ids", "def list_pools(self,\n instance_id: str,\n *,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_pools')\n headers.update(sdk_headers)\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instances/{0}/pools'.format(\n *self.encode_path_vars(instance_id))\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers)\n\n response = self.send(request)\n return response", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def node_pools(self) -> Sequence['outputs.NodePoolResponse']:\n return pulumi.get(self, \"node_pools\")", "def get_lb_pkgs(self):\r\n\r\n lb_filter = '*Load Balancer*'\r\n _filter = NestedDict({})\r\n _filter['items']['description'] = query_filter(lb_filter)\r\n\r\n kwargs = NestedDict({})\r\n kwargs['id'] = 0 # look at package id 0\r\n kwargs['filter'] = _filter.to_dict()\r\n packages = self.prod_pkg.getItems(**kwargs)\r\n pkgs = []\r\n for package in packages:\r\n if not package['description'].startswith('Global'):\r\n pkgs.append(package)\r\n return pkgs" ]
[ "0.64885235", "0.63832843", "0.6373779", "0.62559193", "0.62125343", "0.60228425", "0.5984435", "0.59661174", "0.5937696", "0.5794994", "0.5790406", "0.56883925", "0.5678287", "0.5643476", "0.5605727", "0.5600341", "0.5594697", "0.5594006", "0.5559764", "0.5546073", "0.5520509", "0.5510081", "0.5501044", "0.54573715", "0.54502994", "0.5407357", "0.5405591", "0.54033357", "0.53898996", "0.5298471" ]
0.6885515
0
Fetches information of a certain load balancer pool.
def show_pool(self, pool, **_params): return self.get(self.pool_path % (pool), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def retrieve_pool_stats(self, pool, **_params):\r\n return self.get(self.pool_path_stats % (pool), params=_params)", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def get_pool():\n app = get_app()\n return app['pool']", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def get_pool_stats(self, pool):\n svc = self.pool_path % pool\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting pool stats: '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n if not self._is_pool_owned(val):\n exception_msg = (_('Error pool ownership: '\n 'pool %(pool)s is not owned '\n 'by %(host)s.')\n % {'pool': pool,\n 'host': self.host})\n raise exception.InvalidInput(reason=pool)\n avail = val['pool']['usage']['available']\n used = val['pool']['usage']['used']\n return avail, used", "def get_pool_state(pool_id, instance_name):\n r = subprocess.check_output([\n 'dcos',\n 'edgelb',\n 'list',\n '--name=' + instance_name,\n '--json'\n ], env=_dcos_path())\n pools = json.loads(r)\n\n display.vvv('looking for pool_id {}'.format(pool_id))\n\n state = 'absent'\n for p in pools:\n try:\n if pool_id in p['name']:\n state = 'present'\n display.vvv('found pool: {}'.format(pool_id))\n\n except KeyError:\n continue\n return state", "def get_pool(self, name, dc, cluster):\n cluster_obj = self.get_cluster(cluster, dc)\n for rp in cluster_obj.resourcePool.resourcePool:\n if rp.name == name:\n return rp", "def pool(self):\n return self._properties.get('pool')", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def _determine_resource_pool(session, vm_):\n resource_pool = \"\"\n if \"resource_pool\" in vm_.keys():\n resource_pool = _get_pool(vm_[\"resource_pool\"], session)\n else:\n pool = session.xenapi.pool.get_all()\n if not pool:\n resource_pool = None\n else:\n first_pool = session.xenapi.pool.get_all()[0]\n resource_pool = first_pool\n pool_record = session.xenapi.pool.get_record(resource_pool)\n log.debug(\"resource pool: %s\", pool_record[\"name_label\"])\n return resource_pool", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get_pools():\n gclass = get_class(\"dhcpPool\")\n if gclass is None:\n logger.error(\"failed to get dhcpPool\")\n return None\n\n pools = {}\n for obj in gclass:\n if \"attributes\" in obj[obj.keys()[0]]:\n attr = obj[obj.keys()[0]][\"attributes\"]\n for r in [\"className\", \"dn\", \"id\", \"type\", \"startIp\", \n \"endIp\", \"freeIPs\"]:\n if r not in attr:\n logger.error(\"missing %s, invalid object: %s\" % (\n r, pretty_print(obj)))\n return None\n ip = ipv4_to_int(attr[\"startIp\"])\n if ip is None:\n logger.error(\"failed to convert ipv4 address for %s\" % obj)\n return None\n p = {\n \"className\": attr[\"className\"],\n \"dn\": attr[\"dn\"],\n \"id\": attr[\"id\"],\n \"type\": attr[\"type\"],\n \"address\": ip,\n \"address_str\": attr[\"startIp\"],\n \"freeIPs\": attr[\"freeIPs\"]\n }\n if ip not in pools:\n pools[ip] = {\"bad_lease\":[], \"good_lease\":[], \"pools\":[],\n \"type\":attr[\"className\"], \"state\":\"\", \"address\":ip}\n pools[ip][\"pools\"].append(p)\n\n # loop through all entries in pool and update state\n for ip in pools:\n state = \"recovery\"\n for p in pools[ip][\"pools\"]:\n if p[\"type\"]!=\"recovery\": state = p[\"type\"]\n pools[ip][\"state\"] = state\n return pools", "def fusion_api_get_pool(self, uri=None, api=None, headers=None):\n return self.idpool.get(uri=uri, api=api, headers=headers)", "def get_block_info(pool_name='0-100-pool.burst.cryptoguru.org:8008'):\n channel = grpc.insecure_channel(pool_name)\n stub = api_pb2_grpc.ApiStub(channel)\n block_info = stub.GetBlockInfo(api_pb2.Void())\n return block_info", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))\n return {'items': members_list}", "def get_lbaas_agent_hosting_pool(self, pool, **_params):\r\n return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,\r\n params=_params)", "def _get_pool (self, event):\n return self.pool", "def verify_pool(self, pool):\n svc = self.pool_path % pool\n self.rest_get(svc, restclient.Status.OK)", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool" ]
[ "0.7106879", "0.6871207", "0.6782803", "0.6717103", "0.6549001", "0.6521597", "0.6512605", "0.64808595", "0.64730555", "0.6460818", "0.6423393", "0.63883483", "0.6257994", "0.6255898", "0.62404823", "0.6211178", "0.61900294", "0.6189252", "0.61788994", "0.61485076", "0.612659", "0.6110006", "0.6099576", "0.6047562", "0.6016414", "0.59805", "0.5954284", "0.5933641", "0.5908874", "0.59079045" ]
0.7443111
0
Updates a load balancer pool.
def update_pool(self, pool, body=None): return self.put(self.pool_path % (pool), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_pool(self, context, old_pool, pool):\n old_val, new_val = self.get_diff_of_dict(old_pool, pool)\n LOG.info(\"Received request 'Update Pool' for Pool:%(pool)s \"\n \"in LB:%(lb_id)s with new Param:%(new_val)s and \"\n \"old Param:%(old_val)s\",\n {'pool': pool['id'],\n 'lb_id': pool['loadbalancer_id'],\n 'old_val': old_val,\n 'new_val': new_val})\n arg_dict = {'context': context,\n lb_const.OLD_POOL: old_pool,\n lb_const.POOL: pool,\n }\n self._send_event(lb_const.EVENT_UPDATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])", "def put(self, request, pool_id):\n kwargs = {'pool_id': pool_id}\n update_pool(request, **kwargs)", "def post_loadbalancer_pool_update(self, resource_id, resource_dict):\n pass", "def pre_loadbalancer_pool_update(self, resource_id, resource_dict):\n pass", "def update_loadbalancer(self, context, lb, old):\n LOG.debug(\"\\nupdate_loadbalancer({}): called\".format(lb.id))\n hostnames = self._get_hostname(lb)\n # Update the TrafficIP group\n vapv = self._get_vapv(hostnames)\n # Update allowed_address_pairs\n if not old or lb.vip_address != old.vip_address:\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.add_ip_to_ports(\n lb.vip_address, port_ids\n )\n # Update bandwidth allocation\n if old is not None and old.bandwidth != lb.bandwidth:\n self._update_instance_bandwidth(hostnames, lb.bandwidth)", "def test_update_pool(self):\r\n resource = 'pool'\r\n cmd = pool.UpdatePool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def update_minion_pool():\n pool = fetch_minion_pool()\n save_minion_pool(pool)\n return pool", "def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)", "def update(self, name=None, labels=None):\n # type: (Optional[str], Optional[Dict[str, str]]) -> BoundLoadBalancer\n return self._client.update(self, name, labels)", "def update_device_pool(arn=None, name=None, description=None, rules=None):\n pass", "def put(self, request, pool_id):\n # Assemble the lists of member id's to add and remove, if any exist\n request_member_data = request.DATA.get('members', [])\n\n conn = get_sdk_connection(request)\n existing_members = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n\n (members_to_add, members_to_delete) = get_members_to_add_remove(\n request_member_data, existing_members)\n\n if members_to_add or members_to_delete:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n update_member_list(request, **kwargs)", "def update_listener_pool(self, service, name, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n if vip:\n vip[\"pool\"] = name\n for bigip in bigips:\n v = bigip.tm.ltm.virtuals.virtual\n if v.exists(name=vip[\"name\"], partition=vip[\"partition\"]):\n obj = v.load(name=vip[\"name\"], partition=vip[\"partition\"])\n obj.modify(**vip)", "def put(self, request, member_id, pool_id):\n data = request.DATA\n conn = get_sdk_connection(request)\n monitor_address = data.get('monitor_address')\n member = conn.load_balancer.update_member(\n member_id, pool_id, weight=data.get('weight'),\n monitor_address=monitor_address if monitor_address else None,\n monitor_port=data.get('monitor_port'),\n admin_state_up=data.get('admin_state_up'),\n backup=data.get('backup', False),\n name=data.get('name'),\n )\n return _get_sdk_object_dict(member)", "def put(self, request, loadbalancer_id):\n kwargs = {'loadbalancer_id': loadbalancer_id}\n update_loadbalancer(request, **kwargs)", "def storage_pools_update(context, storage_pools):\n session = get_session()\n\n with session.begin():\n storage_pool_refs = []\n\n for storage_pool in storage_pools:\n LOG.debug('updating storage_pool {0}:'.format(\n storage_pool.get('id')))\n query = _storage_pool_get_query(context, session)\n result = query.filter_by(id=storage_pool.get('id')\n ).update(storage_pool)\n\n if not result:\n LOG.error(exception.StoragePoolNotFound(storage_pool.get(\n 'id')))\n else:\n storage_pool_refs.append(result)\n\n return storage_pool_refs", "def update_connection_pool(maxsize=1):\n get_pool().connection_pool_kw.update(maxsize=maxsize)", "def storage_pool_update(context, storage_pool_id, values):\n session = get_session()\n\n with session.begin():\n query = _storage_pool_get_query(context, session)\n result = query.filter_by(id=storage_pool_id).update(values)\n\n if not result:\n raise exception.StoragePoolNotFound(storage_pool_id)\n\n return result", "def update_pool(request, pool):\n if pool.stage.stage.state != Stage.STARTED:\n return JsonResponse({'success': False, 'reason': 'stage not currently running'})\n else:\n r_type = request.POST['type']\n if r_type == 'bout_result':\n e1 = pool.poolentry_set.get(pk=request.POST['entry_1'])\n e2 = pool.poolentry_set.get(pk=request.POST['entry_2'])\n e1_victory = bool(int(request.POST['e1_victory']))\n e1_score = int(request.POST['e1_score'])\n e2_score = int(request.POST['e2_score'])\n if (e1_victory and e2_score > e1_score) or (not e1_victory and e2_score < e1_score):\n return api_failure('score victory mismatch')\n e1.fencerA_bout_set.update_or_create(fencerB=e2, defaults={'scoreA': e1_score,\n 'victoryA': e1_victory})\n e2.fencerA_bout_set.update_or_create(fencerB=e1, defaults={'scoreA': e2_score,\n 'victoryA': not e1_victory})\n return JsonResponse({'success': True, 'pool_complete': pool.complete()})\n return JsonResponse({'success': False, 'reason': 'unable to understand post'})", "def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()", "def post_loadbalancer_pool_create(self, resource_dict):\n pass", "def fusion_api_edit_storage_pool(self, body, uri, api=None, headers=None):\n return self.pool.update(body, uri, api=api, headers=headers)", "async def update_work_pool(\n work_pool: schemas.actions.WorkPoolUpdate,\n work_pool_name: str = Path(..., description=\"The work pool name\", alias=\"name\"),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n):\n\n # Reserved pools can only updated pause / concurrency\n update_values = work_pool.dict(exclude_unset=True)\n if work_pool_name.lower().startswith(\"prefect\") and (\n set(update_values).difference({\"is_paused\", \"concurrency_limit\"})\n ):\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Worker pools starting with 'Prefect' are reserved for internal use \"\n \"and can only be updated to set concurrency limits or pause.\",\n )\n\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session, work_pool_name=work_pool_name\n )\n await models.workers.update_work_pool(\n session=session,\n work_pool_id=work_pool_id,\n work_pool=work_pool,\n db=db,\n )", "def post_floating_ip_pool_update(self, resource_id, resource_dict):\n pass", "def post_loadbalancer_pool_read(self, resource_id, resource_dict):\n pass", "def pools_refresh(self):\n path = \"%s/commands/poolsRefresh\" % self.__base_path\n response = self.__session.post(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def _v1_0_11111_loadbalancers_3132(self, method, url, body, headers):\n if method == \"PUT\":\n json_body = json.loads(body)\n self.assertDictEqual(json_body, {\"name\": \"new_lb_name\"})\n return (httplib.ACCEPTED, \"\", {}, httplib.responses[httplib.ACCEPTED])\n elif method == \"GET\":\n response_body = json.loads(self.fixtures.load(\"v1_slug_loadbalancers_3xxx.json\"))\n response_body[\"loadBalancer\"][\"id\"] = 3132\n response_body[\"loadBalancer\"][\"name\"] = \"new_lb_name\"\n return (\n httplib.OK,\n json.dumps(response_body),\n {},\n httplib.responses[httplib.OK],\n )\n raise NotImplementedError", "def update_pool_stats(self, pool_id, stats, context, pool=None):\n msg = {'info': {'service_type': lb_const.SERVICE_TYPE,\n 'context': context.to_dict()},\n 'notification': [{'resource': 'pool',\n 'data': {'pool_id': pool_id,\n 'stats': stats,\n 'notification_type': (\n 'update_pool_stats'),\n 'pool': pool_id}}]\n }\n LOG.info(\"Sending Notification 'Update Pool Stats' \"\n \"for pool: %(pool_id)s with stats:%(stats)s\",\n {'pool_id': pool_id,\n 'stats': stats})\n self.notify._notification(msg)", "def add_to_pool(self):\n if self.check_pool():\n for func in self.getter._func:\n proxies = self.getter.get_proxies(func)\n for proxy in proxies:\n self.conn.push_to_right(proxy)\n else:\n print('Pool reached max capacity')", "def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):\n old_val, new_val = self.get_diff_of_dict(\n old_loadbalancer, loadbalancer)\n arg_dict = {'context': context,\n lb_const.OLD_LOADBALANCER: old_loadbalancer,\n lb_const.LOADBALANCER: loadbalancer,\n }\n LOG.info(\"Received request 'Update Loadbalancer' for LB:%(lb)s \"\n \"with new Param:%(new_val)s and old Param:%(old_val)s\",\n {'lb': loadbalancer['id'],\n 'new_val': new_val,\n 'old_val': old_val})\n self._send_event(lb_const.EVENT_UPDATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def update_load_balancer(self,\n instance_id: str,\n dnszone_id: str,\n lb_id: str,\n *,\n name: str = None,\n description: str = None,\n enabled: bool = None,\n ttl: int = None,\n fallback_pool: str = None,\n default_pools: List[str] = None,\n az_pools: List['LoadBalancerAzPoolsItem'] = None,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n if dnszone_id is None:\n raise ValueError('dnszone_id must be provided')\n if lb_id is None:\n raise ValueError('lb_id must be provided')\n if az_pools is not None:\n az_pools = [convert_model(x) for x in az_pools]\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='update_load_balancer')\n headers.update(sdk_headers)\n\n data = {\n 'name': name,\n 'description': description,\n 'enabled': enabled,\n 'ttl': ttl,\n 'fallback_pool': fallback_pool,\n 'default_pools': default_pools,\n 'az_pools': az_pools\n }\n data = {k: v for (k, v) in data.items() if v is not None}\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instances/{0}/dnszones/{1}/load_balancers/{2}'.format(\n *self.encode_path_vars(instance_id, dnszone_id, lb_id))\n request = self.prepare_request(method='PUT',\n url=url,\n headers=headers,\n data=data)\n\n response = self.send(request)\n return response" ]
[ "0.68706685", "0.6857617", "0.6845245", "0.6766318", "0.6747887", "0.65619475", "0.6480729", "0.63982177", "0.6064409", "0.6002602", "0.5965404", "0.5946069", "0.58925164", "0.5889436", "0.5783237", "0.574946", "0.56865215", "0.5683171", "0.5667662", "0.5654379", "0.5612481", "0.5575353", "0.5531531", "0.5519611", "0.5494225", "0.54742044", "0.5460294", "0.5440225", "0.5428523", "0.54265904" ]
0.7689518
0
Retrieves stats for a certain load balancer pool.
def retrieve_pool_stats(self, pool, **_params): return self.get(self.pool_path_stats % (pool), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pool_stats(self, pool):\n svc = self.pool_path % pool\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting pool stats: '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n if not self._is_pool_owned(val):\n exception_msg = (_('Error pool ownership: '\n 'pool %(pool)s is not owned '\n 'by %(host)s.')\n % {'pool': pool,\n 'host': self.host})\n raise exception.InvalidInput(reason=pool)\n avail = val['pool']['usage']['available']\n used = val['pool']['usage']['used']\n return avail, used", "def get_stats(self, loadbalancer=None):\n uri = \"/loadbalancers/%s/stats\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))\n return {'items': members_list}", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def get(self, request):\n pool_id = request.GET.get('poolId')\n conn = get_sdk_connection(request)\n health_monitor_list = _sdk_object_to_list(\n conn.load_balancer.health_monitors(\n project_id=request.user.project_id\n )\n )\n\n if pool_id:\n health_monitor_list = self._filter_health_monitors(\n health_monitor_list,\n pool_id)\n return {'items': health_monitor_list}", "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))", "def get_pool():\n app = get_app()\n return app['pool']", "def get_stats() -> dict:\n\n url = f\"{CONFIG.POSTGREST}/app_about_stats\"\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n except (requests.ConnectionError, requests.exceptions.HTTPError) as e:\n APP.logger.error(f'API request for db stats returned: {e}')\n else:\n results = json.loads(response.text)\n # APP.logger.debug(results)\n return results", "def dbstats_api():\n if not config.DEBUG:\n limit_to_localhost()\n\n return jsonify(status='ok', stats=sqlalchemy_pool_status()) # cant be async, used by the reboot script", "async def skribbl_get_stats(self) -> int:\r\n return await self.read(self._skribbl_get_stats)", "def GetStats(self):\r\n\t\tArg1 = self.href\r\n\t\treturn self._execute('GetStats', payload=locals(), response_object=None)", "def update_pool_stats(self, pool_id, stats, context, pool=None):\n msg = {'info': {'service_type': lb_const.SERVICE_TYPE,\n 'context': context.to_dict()},\n 'notification': [{'resource': 'pool',\n 'data': {'pool_id': pool_id,\n 'stats': stats,\n 'notification_type': (\n 'update_pool_stats'),\n 'pool': pool_id}}]\n }\n LOG.info(\"Sending Notification 'Update Pool Stats' \"\n \"for pool: %(pool_id)s with stats:%(stats)s\",\n {'pool_id': pool_id,\n 'stats': stats})\n self.notify._notification(msg)", "def get_block_info(pool_name='0-100-pool.burst.cryptoguru.org:8008'):\n channel = grpc.insecure_channel(pool_name)\n stub = api_pb2_grpc.ApiStub(channel)\n block_info = stub.GetBlockInfo(api_pb2.Void())\n return block_info", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def get_lbaas_agent_hosting_pool(self, pool, **_params):\r\n return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,\r\n params=_params)", "def get_project_stats(self, pool, project):\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting project stats: '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n avail = val['project']['space_available']\n return avail", "def get_stats(self, **kwargs):\n resp = self.get(_u.build_uri(\"stats\"), kwargs)\n return utils.handle_response(resp)", "def get_pools():\n gclass = get_class(\"dhcpPool\")\n if gclass is None:\n logger.error(\"failed to get dhcpPool\")\n return None\n\n pools = {}\n for obj in gclass:\n if \"attributes\" in obj[obj.keys()[0]]:\n attr = obj[obj.keys()[0]][\"attributes\"]\n for r in [\"className\", \"dn\", \"id\", \"type\", \"startIp\", \n \"endIp\", \"freeIPs\"]:\n if r not in attr:\n logger.error(\"missing %s, invalid object: %s\" % (\n r, pretty_print(obj)))\n return None\n ip = ipv4_to_int(attr[\"startIp\"])\n if ip is None:\n logger.error(\"failed to convert ipv4 address for %s\" % obj)\n return None\n p = {\n \"className\": attr[\"className\"],\n \"dn\": attr[\"dn\"],\n \"id\": attr[\"id\"],\n \"type\": attr[\"type\"],\n \"address\": ip,\n \"address_str\": attr[\"startIp\"],\n \"freeIPs\": attr[\"freeIPs\"]\n }\n if ip not in pools:\n pools[ip] = {\"bad_lease\":[], \"good_lease\":[], \"pools\":[],\n \"type\":attr[\"className\"], \"state\":\"\", \"address\":ip}\n pools[ip][\"pools\"].append(p)\n\n # loop through all entries in pool and update state\n for ip in pools:\n state = \"recovery\"\n for p in pools[ip][\"pools\"]:\n if p[\"type\"]!=\"recovery\": state = p[\"type\"]\n pools[ip][\"state\"] = state\n return pools", "def get(self, request, member_id, pool_id):\n conn = get_sdk_connection(request)\n member = conn.load_balancer.find_member(member_id, pool_id)\n return _get_sdk_object_dict(member)", "def get_pool_info(_ns, pool, human_friendly):\n size = size2str(pool.TotalManagedSpace, human_friendly)\n return (pool.InstanceID,\n pool.ElementName,\n pool.ElementName,\n size,\n \"volume group (LVM)\")", "def get_statistics(self):\n\t\treturn Job(SDK.PrlSrv_GetStatistics(self.handle)[0])", "def pool(self):\n return self._properties.get('pool')", "def get(self, *args, **kwargs):\n output = self._base_stats()\n output['connections'] = dict()\n for key in self.application.rabbitmq.keys():\n output['connections'][key] = self.application.rabbitmq[key].stats\n self.write(output)", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret" ]
[ "0.7811298", "0.71534944", "0.6650154", "0.6583187", "0.6479198", "0.6318627", "0.6265656", "0.62604696", "0.61618876", "0.6066381", "0.5996045", "0.5907003", "0.58319587", "0.5795752", "0.57949924", "0.5728804", "0.5718277", "0.56890684", "0.5630172", "0.5616478", "0.5546755", "0.5532498", "0.552894", "0.551519", "0.55133533", "0.5502385", "0.5469119", "0.5422619", "0.54150313", "0.5403193" ]
0.83906263
0
Fetches a list of all load balancer members for a tenant.
def list_members(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('members', self.members_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_members(self):\n\n user_migrator = MigrateMemberAuth()\n\n # Make sure we only fetch ID's from the database\n return user_migrator._list_from().only('id')", "def _get_org_members(self):\n url = f\"{BASE_URL}/orgs/{ORG}/members\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def list_members(self, **filters):\n\t\tresult = self.client.get(self._endpoint + \"/member\", params=filters)\n\t\treturn PaginatedList(result, Member, (self.user_id, self.site_id), \"member_id\")", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))\n return {'items': members_list}", "async def get_members(guild_id):\n user_id = await token_check()\n await guild_check(user_id, guild_id)\n\n j = await request.get_json()\n\n limit, after = int(j.get('limit', 1)), j.get('after', 0)\n\n if limit < 1 or limit > 1000:\n raise BadRequest('limit not in 1-1000 range')\n\n user_ids = await app.db.fetch(f\"\"\"\n SELECT user_id\n WHERE guild_id = $1, user_id > $2\n LIMIT {limit}\n ORDER BY user_id ASC\n \"\"\", guild_id, after)\n\n user_ids = [r[0] for r in user_ids]\n members = await app.storage.get_member_multi(guild_id, user_ids)\n return jsonify(members)", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def get_all_members(self):\n members = self.wls_board.get_members()\n return members", "def list(self, **params):\n # This is to ensure tenant_id key is not populated\n # if tenant_id=None is specified.\n tenant_id = params.pop('tenant_id', self.request.user.tenant_id)\n if tenant_id:\n params['tenant_id'] = tenant_id\n return self._list(**params)", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise", "def members(self):\n return self.find_users_by_rel('member')", "def list(self, tenant_id=None):\n\n if not tenant_id:\n return self._list(\"/users\", \"users\")\n else:\n return self._list(\"/tenants/%s/users\" % tenant_id, \"users\")", "async def get_bans(self) -> 'typing.List[dt_user.User]':\n if not self.me.guild_permissions.ban_members:\n raise PermissionsError(\"ban_members\")\n\n bans = await self._bot.http.get_bans(self.id)\n users = []\n\n for user_data in bans:\n # TODO: Audit log stuff, if it ever comes out.\n user_data = user_data.get(\"user\", None)\n users.append(dt_user.User(self._bot, **user_data))\n\n return users", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def list(self):\n sql = \"\"\"SELECT\n rowid,\n first,\n last,\n introducedDate\n FROM members\n ORDER BY rowid\"\"\"\n rows = list(self.cursor.execute(sql))\n return [self.buildMember(row) for row in rows]", "def getAllMembers(self):\n if not self.is_compatible(): return []\n return self.tool.listMembers()", "def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "def list_member_accounts(nextToken=None, maxResults=None):\n pass", "def get_members(base_url, end_url):\n reps, content_len, bill_info, votes = get_next_page(base_url, end_url, 1)\n return reps", "def get(self):\n\n offset = 0\n limit = Config.get_page_limit()\n args = request.args\n try:\n offset = request.args['offset']\n except Exception:\n pass\n\n try:\n limit = request.args['limit']\n except Exception:\n pass\n\n return self.get_request_handler(request.headers).get_all_users(offset=offset, limit=limit)", "def all_organization_members(\n self,\n id: str,\n page: int | None = None,\n per_page: int | None = None,\n include_totals: bool = True,\n from_param: str | None = None,\n take: int | None = None,\n ):\n\n params = {\n \"page\": page,\n \"per_page\": per_page,\n \"include_totals\": str(include_totals).lower(),\n \"from\": from_param,\n \"take\": take,\n }\n\n return self.client.get(self._url(id, \"members\"), params=params)", "def get_db_members(self, context, members):\n members_objs = self.dns_manager.get_db_members(context, members)\n return members_objs", "def members(self, uid='*'):\n entries = self.search(uid)\n\n if self.objects:\n return self.member_objects(entries)\n\n result = []\n for entry in entries:\n result.append(entry[1])\n\n return result", "def get_members(self, guild: discord.Guild) -> list[discord.Member]:\n role = guild.get_role(self.role)\n return [] if not role else role.members", "def get_members(self):\n return self._members", "def list_all():\n\n members = ldapi.search(ld, cfg['ldap_users_base'], '(objectClass=member)')\n return dict([(member[0], member[1]) for member in members])", "def get_members(id): # pylint: disable=I0011,W0622\n\n l = Legacy.query.get_or_404(id)\n\n if current_app.config.get('IGNORE_AUTH') is not True: # pragma: no cover\n if not l.can_view(g.user.id):\n raise Http403('Access denied')\n\n return {'members': [m.to_dict(public_only=True) for m in l.members]}" ]
[ "0.6391402", "0.6124557", "0.6073281", "0.60686046", "0.60608774", "0.60337555", "0.60250235", "0.59194875", "0.58834416", "0.58087546", "0.574711", "0.5702761", "0.56860137", "0.56655455", "0.56489336", "0.56368655", "0.56167287", "0.56167287", "0.56167287", "0.56167287", "0.56110966", "0.55583036", "0.55165404", "0.5493551", "0.54892105", "0.5488875", "0.54555696", "0.54547054", "0.54538435", "0.5452779" ]
0.6616581
0
Fetches information of a certain load balancer member.
def show_member(self, member, **_params): return self.get(self.member_path % (member), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request, member_id, pool_id):\n conn = get_sdk_connection(request)\n member = conn.load_balancer.find_member(member_id, pool_id)\n return _get_sdk_object_dict(member)", "async def get_guild_member(guild_id, member_id):\n user_id = await token_check()\n await guild_check(user_id, guild_id)\n member = await app.storage.get_single_member(guild_id, member_id)\n return jsonify(member)", "def member(self, user):\n return self.search(uid=user)[0][1]", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))\n return {'items': members_list}", "def get_member(did):\n conn = create_connection(db_location)\n c = conn.cursor()\n c.execute(\"SELECT * FROM members WHERE member_uid = \" + did)\n member = dict((c.description[i][0], value) for i, value in enumerate(c.fetchone()))\n if __debug__:\n print(member)\n conn.commit()\n conn.close()\n return member", "def get_member_config(baseurl, cookie_header):\n member_id = int(input('\\nEnter Member ID: '))\n if member_id <= 0:\n print('Member IDs are positive integers, please check your ID')\n exit(1)\n url = baseurl + 'stacking/vsf/members/%i' % member_id\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def getMember(unique_name):", "def getMember(unique_name):", "def get_member_link_info(baseurl, cookie_header, member_id, link_id):\n url = baseurl + 'stacking/vsf/members/{}/links/{}'.format(member_id, link_id)\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "async def info_user(self, ctx, member: Optional[discord.Member]):\n member1 = member or ctx.author\n embed = discord.Embed(title=\"Member Information\",\n color=discord.Color.blurple(),\n timestamp=datetime.utcnow())\n\n embed.add_field(name=\"ID\", value=f\"{member1.id}\", inline=False)\n embed.add_field(\n name=\"Name\", value=f\"{member1.name}#{member1.discriminator}\")\n embed.add_field(name=\"Top role\", value=f\"{member1.top_role.mention}\")\n embed.add_field(name=\"status\",\n value=f\"{str(member1.activity.type).split('.') if member1.activity else 'N/A'} {member1.activity.name if member1.activity else ''}\")\n embed.add_field(\n name=\"created at\", value=f\"{member1.created_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(\n name=\"Joined at\", value=f\"{member1.joined_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(name=\"Boosted?\", value=f\"{member1.premium_since}\")\n\n await ctx.reply(embed=embed)", "def get_member_system_info(baseurl, cookie_header, member_id):\n url = baseurl + 'stacking/vsf/members/system_info/{}'.format(member_id)\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def member(self, uid):\n try:\n member = self.search(uid=uid)[0]\n except IndexError:\n return None\n\n if self.objects:\n return member\n\n return member[1]", "def member(self, request, **kwargs):\n group_obj = self.get_object()\n member_data = group_obj.members.all()\n if member_data is not None:\n serializer_data = MemberSerializer(member_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found'}, status=status.HTTP_404_NOT_FOUND)", "def info(self, membership, callback=None):", "def get_member_status(self, service, bigip, status_keys):\n member_status = {}\n pool = self.service_adapter.get_pool(service)\n member = self.service_adapter.get_member(service)\n part = pool[\"partition\"]\n try:\n p = self.pool_helper.load(bigip,\n name=pool[\"name\"],\n partition=part)\n\n m = p.members_s.members\n if m.exists(name=urllib.quote(member[\"name\"]), partition=part):\n m = m.load(name=urllib.quote(member[\"name\"]), partition=part)\n member_status = self.pool_helper.collect_stats(\n m, stat_keys=status_keys)\n else:\n LOG.error(\"Unable to get member status. \"\n \"Member %s does not exist.\", member[\"name\"])\n\n except Exception as e:\n # log error but continue on\n LOG.error(\"Error getting member status: %s\", e.message)\n\n return member_status", "def get(user):\n if user:\n return Member.get_by_key_name(user.user_id())", "def get_member(self, *args, **kwargs):\n return self.bot.get_chat_member(self.id, *args, **kwargs)", "def get_link_info(baseurl, cookie_header, member_id):\n url = baseurl + 'stacking/vsf/members/{}/links'.format(member_id)\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def get(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.find_load_balancer(loadbalancer_id)\n loadbalancer_dict = _get_sdk_object_dict(loadbalancer)\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, [loadbalancer_dict])\n return loadbalancer_dict", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "async def info(self, ctx, *, member: disnake.Member = None):\n\n member = member or ctx.author\n\n e = disnake.Embed(description=\"\")\n\n if member.bot:\n e.description = \"This account is a bot.\\n\\n\"\n\n e.description += member.mention\n\n e.add_field(name=\"Status\", value=member.status)\n\n if member.activity:\n e.add_field(name=\"Activity\", value=member.activity.name)\n\n e.set_author(name=str(member), icon_url=member.display_avatar.url)\n\n now = datetime.now(timezone.utc)\n created = member.created_at\n joined = member.joined_at\n\n e.add_field(\n name=\"Account age\",\n value=\"{0} • Created <t:{1}:F>\".format(\n pretty_timedelta(now - created), round(created.timestamp())\n ),\n inline=False,\n )\n\n e.add_field(\n name=\"Member for\",\n value=\"{0} • Joined <t:{1}:F>\".format(\n pretty_timedelta(now - joined), round(joined.timestamp())\n ),\n )\n\n if len(member.roles) > 1:\n e.add_field(\n name=\"Roles\",\n value=\" \".join(role.mention for role in reversed(member.roles[1:])),\n inline=False,\n )\n\n e.set_footer(text=\"ID: \" + str(member.id))\n\n await ctx.send(embed=e)", "async def userinfo_command(self, ctx, member: Optional[Member]):\n member = member or ctx.author\n member_avatar = member.avatar_url\n id = member.id\n name = member.name\n accountAge = member.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\")\n joinServerDate = member.joined_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\")\n highestRole = member.top_role.mention\n\n info = \"Server Owner\" if ctx.guild.owner is ctx.author else \"Member\"\n\n embed = Embed(\n title=f\"User Info - {member.name}\",\n timestamp=datetime.utcnow(),\n color=Color.blurple(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_thumbnail(url=member_avatar)\n fields = [\n (\"ID\", id, False),\n (\"Name\", f\"{name} #{ctx.author.discriminator}\", True),\n (\"Highest Role\", highestRole, True),\n (\"Account Created on\", accountAge, True),\n (\"Joined Server on\", joinServerDate, True),\n (\"Additional Info\", info, True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)", "def member_info(self, attempt=1):\n\n response = self.postman.request('member_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n self.members += len(response.json())\n\n self.member_info(attempt=attempt + 1)", "def member(self) -> object:\n return self._member", "def get_member(self, name):\n members = self.wls_board.get_members()\n for member in members:\n if name in member.full_name:\n return member\n return 'None'", "def _search_member_by_name(self, fullname):\n if not fullname:\n return None\n\n membership = api.portal.get_tool(\"portal_membership\")\n members = membership.searchForMembers(name=fullname)\n if members:\n # in case there are more than one members with the\n # same fullname, we use the first one listed\n member = members[0].getUserId()\n return membership.getMemberInfo(member)", "def getMember(self, *args):\n return _libsbml.Group_getMember(self, *args)" ]
[ "0.75904655", "0.6251855", "0.6246115", "0.62460583", "0.61917824", "0.61822265", "0.6129956", "0.6129956", "0.6072084", "0.5963283", "0.59447235", "0.593797", "0.5892445", "0.58627737", "0.5849817", "0.5792465", "0.5763985", "0.57312113", "0.5646421", "0.5633066", "0.5633066", "0.5633066", "0.5633066", "0.56311363", "0.5623576", "0.5611181", "0.5600249", "0.5595599", "0.5562618", "0.5491842" ]
0.684679
1
Updates a load balancer member.
def update_member(self, member, body=None): return self.put(self.member_path % (member), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put(self, request, member_id, pool_id):\n data = request.DATA\n conn = get_sdk_connection(request)\n monitor_address = data.get('monitor_address')\n member = conn.load_balancer.update_member(\n member_id, pool_id, weight=data.get('weight'),\n monitor_address=monitor_address if monitor_address else None,\n monitor_port=data.get('monitor_port'),\n admin_state_up=data.get('admin_state_up'),\n backup=data.get('backup', False),\n name=data.get('name'),\n )\n return _get_sdk_object_dict(member)", "def update_member(self, context, old_member, member):\n old_val, new_val = self.get_diff_of_dict(old_member, member)\n LOG.info(\"Received request 'Update Member' for Member:\"\n \"%(member_id)s in Pool:%(pool_id)s with new Param:\"\n \"%(new_val)s and old Param:%(old_val)s\",\n {'pool_id': member['pool_id'],\n 'member_id': member['id'],\n 'old_val': old_val,\n 'new_val': new_val})\n arg_dict = {'context': context,\n lb_const.OLD_MEMBER: old_member,\n lb_const.MEMBER: member,\n }\n self._send_event(lb_const.EVENT_UPDATE_MEMBER_V2, arg_dict,\n serialize=True,\n binding_key=member[lb_const.POOL]['loadbalancer_id'],\n key=member['id'])", "def _update(self):\n path = \"/members/%s\" % self._dict['member_id']\n data = self.extract()\n if self._dict['member_status_id'] in (\n MemberStatus.Active, MemberStatus.Error, MemberStatus.OptOut):\n data['status_to'] = self._dict['member_status_id']\n if not self.account.adapter.put(path, data):\n raise ex.MemberUpdateError()", "def update_member_list(request, **kwargs):\n data = request.DATA\n loadbalancer_id = data.get('loadbalancer_id')\n pool_id = kwargs.get('pool_id')\n existing_members = kwargs.get('existing_members')\n members_to_add = kwargs.get('members_to_add')\n members_to_delete = kwargs.get('members_to_delete')\n\n if members_to_delete:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n remove_member(request, **kwargs)\n elif members_to_add:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n add_member(request, **kwargs)\n elif data.get('monitor'):\n args = (request, loadbalancer_id, update_monitor)\n thread.start_new_thread(poll_loadbalancer_status, args)", "def test_update_member(self):\r\n resource = 'member'\r\n cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def post_loadbalancer_member_update(self, resource_id, resource_dict):\n pass", "def put(self, request, pool_id):\n # Assemble the lists of member id's to add and remove, if any exist\n request_member_data = request.DATA.get('members', [])\n\n conn = get_sdk_connection(request)\n existing_members = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n\n (members_to_add, members_to_delete) = get_members_to_add_remove(\n request_member_data, existing_members)\n\n if members_to_add or members_to_delete:\n kwargs = {'existing_members': existing_members,\n 'members_to_add': members_to_add,\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}\n update_member_list(request, **kwargs)", "def put(self, request, loadbalancer_id):\n kwargs = {'loadbalancer_id': loadbalancer_id}\n update_loadbalancer(request, **kwargs)", "def member(self, member: object):\n\n self._member = member", "async def modify_guild_member(guild_id, member_id):\n user_id = await token_check()\n await guild_owner_check(user_id, guild_id)\n\n j = validate(await request.get_json(), MEMBER_UPDATE)\n nick_flag = False\n\n if 'nick' in j:\n await guild_perm_check(user_id, guild_id, 'manage_nicknames')\n\n nick = j['nick'] or None\n\n await app.db.execute(\"\"\"\n UPDATE members\n SET nickname = $1\n WHERE user_id = $2 AND guild_id = $3\n \"\"\", nick, member_id, guild_id)\n\n nick_flag = True\n\n if 'mute' in j:\n await guild_perm_check(user_id, guild_id, 'mute_members')\n\n await app.db.execute(\"\"\"\n UPDATE members\n SET muted = $1\n WHERE user_id = $2 AND guild_id = $3\n \"\"\", j['mute'], member_id, guild_id)\n\n if 'deaf' in j:\n await guild_perm_check(user_id, guild_id, 'deafen_members')\n\n await app.db.execute(\"\"\"\n UPDATE members\n SET deafened = $1\n WHERE user_id = $2 AND guild_id = $3\n \"\"\", j['deaf'], member_id, guild_id)\n\n if 'channel_id' in j:\n # TODO: check MOVE_MEMBERS and CONNECT to the channel\n # TODO: change the member's voice channel\n pass\n\n if 'roles' in j:\n await guild_perm_check(user_id, guild_id, 'manage_roles')\n await _update_member_roles(guild_id, member_id, j['roles'])\n\n member = await app.storage.get_member_data_one(guild_id, member_id)\n member.pop('joined_at')\n\n # call pres_update for role and nick changes.\n partial = {\n 'roles': member['roles']\n }\n\n if nick_flag:\n partial['nick'] = j['nick']\n\n await app.dispatcher.dispatch(\n 'lazy_guild', guild_id, 'pres_update', user_id, partial)\n\n await app.dispatcher.dispatch_guild(guild_id, 'GUILD_MEMBER_UPDATE', {**{\n 'guild_id': str(guild_id)\n }, **member})\n\n return '', 204", "def pre_loadbalancer_member_update(self, resource_id, resource_dict):\n pass", "def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)", "def add_member(request, **kwargs):\n data = request.DATA\n members = data.get('members')\n pool_id = kwargs.get('pool_id')\n\n if kwargs.get('members_to_add'):\n members_to_add = kwargs['members_to_add']\n index = [members.index(member) for member in members\n if member['id'] == members_to_add[0]][0]\n loadbalancer_id = data.get('loadbalancer_id')\n else:\n index = kwargs.get('index')\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n member = members[index]\n\n conn = get_sdk_connection(request)\n monitor_address = member.get('monitor_address')\n member = conn.load_balancer.create_member(\n pool_id,\n address=member['address'],\n protocol_port=member['protocol_port'],\n subnet_id=member['subnet_id'],\n weight=member.get('weight'),\n monitor_address=monitor_address if monitor_address else None,\n monitor_port=member.get('monitor_port'),\n admin_state_up=member.get('admin_state_up'),\n backup=member.get('backup', False),\n name=member.get('name'),\n )\n\n index += 1\n if kwargs.get('members_to_add'):\n args = (request, loadbalancer_id, update_member_list)\n members_to_add = kwargs['members_to_add']\n members_to_add.pop(0)\n kwargs = {'callback_kwargs': {\n 'existing_members': kwargs.get('existing_members'),\n 'members_to_add': members_to_add,\n 'members_to_delete': kwargs.get('members_to_delete'),\n 'pool_id': pool_id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif len(members) > index:\n args = (request, loadbalancer_id, add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool_id,\n 'index': index}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, loadbalancer_id, create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool_id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(member)", "def xbrl_member(self, xbrl_member):\n\n self._xbrl_member = xbrl_member", "def replace_member(self, member_to_replace, new_member):\r\n if (isinstance(member_to_replace, int) and\r\n member_to_replace < len(self._members)):\r\n membidx = member_to_replace\r\n else:\r\n membidx = self._members.index(member_to_replace)\r\n self._members[membidx] = new_member", "async def update(\n self,\n blockchain_member_name: str,\n resource_group_name: str,\n tags: Optional[Dict[str, str]] = None,\n password: Optional[str] = None,\n firewall_rules: Optional[List[\"FirewallRule\"]] = None,\n consortium_management_account_password: Optional[str] = None,\n **kwargs\n ) -> \"models.BlockchainMember\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.BlockchainMember\"]\n error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})\n\n _blockchain_member = models.BlockchainMemberUpdate(tags=tags, password=password, firewall_rules=firewall_rules, consortium_management_account_password=consortium_management_account_password)\n api_version = \"2018-06-01-preview\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n\n # Construct URL\n url = self.update.metadata['url']\n path_format_arguments = {\n 'blockchainMemberName': self._serialize.url(\"blockchain_member_name\", blockchain_member_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = 'application/json'\n\n # Construct and send request\n body_content_kwargs = {} # type: Dict[str, Any]\n if _blockchain_member is not None:\n body_content = self._serialize.body(_blockchain_member, 'BlockchainMemberUpdate')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)\n\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('BlockchainMember', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def update_member_by_id(self, id, vestorly_auth, member, **kwargs):\n \n # verify the required parameter 'id' is set\n if id is None:\n raise ValueError(\"Missing the required parameter `id` when calling `update_member_by_id`\")\n \n # verify the required parameter 'vestorly_auth' is set\n if vestorly_auth is None:\n raise ValueError(\"Missing the required parameter `vestorly_auth` when calling `update_member_by_id`\")\n \n # verify the required parameter 'member' is set\n if member is None:\n raise ValueError(\"Missing the required parameter `member` when calling `update_member_by_id`\")\n \n all_params = ['id', 'vestorly_auth', 'member']\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method update_member_by_id\" % key)\n params[key] = val\n del params['kwargs']\n\n resource_path = '/members/{id}'.replace('{format}', 'json')\n method = 'PUT'\n\n path_params = remove_none(dict(id=params.get('id')))\n query_params = remove_none(dict(vestorly_auth=params.get('vestorly_auth')))\n header_params = remove_none(dict())\n form_params = remove_none(dict())\n files = remove_none(dict())\n body_params = params.get('member')\n\n # HTTP header `Accept`\n header_params['Accept'] = ApiClient.select_header_accept([])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = ApiClient.select_header_content_type([])\n\n response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,\n body=body_params, post_params=form_params, files=files,\n response='Memberresponse')\n \n return response", "def update_loadbalancer(self, context, lb, old):\n LOG.debug(\"\\nupdate_loadbalancer({}): called\".format(lb.id))\n hostnames = self._get_hostname(lb)\n # Update the TrafficIP group\n vapv = self._get_vapv(hostnames)\n # Update allowed_address_pairs\n if not old or lb.vip_address != old.vip_address:\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.add_ip_to_ports(\n lb.vip_address, port_ids\n )\n # Update bandwidth allocation\n if old is not None and old.bandwidth != lb.bandwidth:\n self._update_instance_bandwidth(hostnames, lb.bandwidth)", "async def on_member_update(old, updated):\n if old.nick != updated.nick:\n boterate.update_member(updated)", "def remove_member(request, **kwargs):\n data = request.DATA\n loadbalancer_id = data.get('loadbalancer_id')\n pool_id = kwargs.get('pool_id')\n\n if kwargs.get('members_to_delete'):\n members_to_delete = kwargs['members_to_delete']\n member_id = members_to_delete.pop(0)\n\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_member(member_id, pool_id,\n ignore_missing=True)\n\n args = (request, loadbalancer_id, update_member_list)\n kwargs = {'callback_kwargs': {\n 'existing_members': kwargs.get('existing_members'),\n 'members_to_add': kwargs.get('members_to_add'),\n 'members_to_delete': members_to_delete,\n 'pool_id': pool_id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)", "def member_list(self, member_list):\n\n self._member_list = member_list", "def touch_member(self, data, ttl=None, permanent=False):", "async def guild_member_update_middleware(self, payload: GatewayDispatch):\n\n return (\n \"on_guild_member_update\",\n [GuildMemberUpdateEvent.from_dict(\n construct_client_dict(self, payload.data)\n )]\n )", "async def update(self, ctx, member: Member, channel: TextChannel):\n user = User(self.bot.db)\n monitor = await user.get(multi=False, guild_id=ctx.guild.id, user_id=member.id)\n if not monitor:\n return await ctx.send(\"This member is not monitored.\")\n\n await user.update({\"id\": monitor[\"id\"]}, channel_id=channel.id)\n await ctx.send(\"Monitor has been updated.\")", "def update_user(username):\n try:\n member = Member.objects.get(username=username)\n except Member.DoesNotExist:\n pass\n else:\n member.save()", "def member_of(self, member_of: object):\n\n self._member_of = member_of", "def delete(self, request, member_id, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_member,\n member_id, pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)", "def update_station_member(self, station_id, userid, role_id):\n return self._stations_service.update_station_member(station_id, userid, role_id)", "def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):\n old_val, new_val = self.get_diff_of_dict(\n old_loadbalancer, loadbalancer)\n arg_dict = {'context': context,\n lb_const.OLD_LOADBALANCER: old_loadbalancer,\n lb_const.LOADBALANCER: loadbalancer,\n }\n LOG.info(\"Received request 'Update Loadbalancer' for LB:%(lb)s \"\n \"with new Param:%(new_val)s and old Param:%(old_val)s\",\n {'lb': loadbalancer['id'],\n 'new_val': new_val,\n 'old_val': old_val})\n self._send_event(lb_const.EVENT_UPDATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def update(self, user: U) -> None:\n ..." ]
[ "0.73988515", "0.7200073", "0.7090033", "0.6488313", "0.6397488", "0.6339667", "0.62624913", "0.61097205", "0.6095201", "0.6090544", "0.60724914", "0.6049983", "0.59969854", "0.5987845", "0.5789719", "0.57263315", "0.5704301", "0.5588402", "0.5576284", "0.55726373", "0.54870963", "0.5392308", "0.5379711", "0.5373405", "0.5345", "0.5339057", "0.53330636", "0.5321874", "0.53019565", "0.52936834" ]
0.7679598
0
Fetches a list of all load balancer health monitors for a tenant.
def list_health_monitors(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('health_monitors', self.health_monitors_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request):\n pool_id = request.GET.get('poolId')\n conn = get_sdk_connection(request)\n health_monitor_list = _sdk_object_to_list(\n conn.load_balancer.health_monitors(\n project_id=request.user.project_id\n )\n )\n\n if pool_id:\n health_monitor_list = self._filter_health_monitors(\n health_monitor_list,\n pool_id)\n return {'items': health_monitor_list}", "def get_health_monitor(self, loadbalancer):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body.get(\"healthMonitor\", {})", "def get_health_monitor(self, loadbalancer):\n return loadbalancer.get_health_monitor()", "def get(self, request, health_monitor_id):\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.find_health_monitor(health_monitor_id)\n return _get_sdk_object_dict(health_mon)", "def test_list_healthmonitors_sort(self):\r\n resources = \"health_monitors\"\r\n cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def test_list_monitorings_success(self):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings\"\n )\n result = rv.json()\n expected = util.MOCK_MONITORING_LIST\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def getMonitoringHosts(self):\r\n return self.monitoringClients.values()", "def test_get_hyperflex_health_list(self):\n pass", "def get_health_monitor(self):\n return self.manager.get_health_monitor(self)", "def show_health_monitor(self, health_monitor, **_params):\r\n return self.get(self.health_monitor_path % (health_monitor),\r\n params=_params)", "def clients(self):\n\n try:\n req = requests.get(self.root_url + \"/clients\")\n except requests.exceptions.ConnectionError as e:\n req = None\n print(str(e), file=sys.stderr)\n except Exception as e:\n print(\"Unknown error making a request to the Sensu API\", file=sys.stderr)\n print(str(e), file=sys.stderr)\n\n if req and req.status_code == 200:\n dat = req.json()\n for host in dat:\n self.metrics.append(('sensu_status', host['status'], {'host': host['name'], 'dc': host['dc']}))", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def getMonitors(self):\n return [self.monitor]", "def _retrieve_health_data(self):\n return self._client.request('_cluster/health', query={'level': 'shards'}).data", "def get_monitoring_checks(self):\n logger.debug('Getting monitoring checks')\n plugins_dirs = getattr(self, 'plugins_dirs')\n monitoring_checks = []\n for service_desc, cmd in getattr(self, 'commands').items():\n try:\n plugin_name = cmd.split()[0]\n except IndexError:\n raise ValueError('Wrong plugin command: {}'.format(cmd))\n\n if plugin_name in self.plugins_idx:\n monitoring_checks.append(\n MonitoringCheck(\n getattr(self, 'hostname'),\n service_desc,\n self.plugins_idx[plugin_name]\n )\n )\n else:\n raise ValueError(\n \"Plugin '{}' is not found in {}\".format(\n plugin_name, plugins_dirs)\n )\n\n logger.debug(monitoring_checks)\n return monitoring_checks", "def get_host_stats(self, refresh=False):", "def get_alarms(region):\n\n client = boto3.client(\"cloudwatch\", region_name=region)\n\n describe_response = client.describe_alarms()\n\n alarms = describe_response[\"MetricAlarms\"]\n\n return alarms", "def get_intervals(self, account):\n buckets = []\n for monitor in self.get_watchauditors(account):\n interval = monitor.watcher.get_interval()\n if not interval in buckets:\n buckets.append(interval)\n return buckets", "def get_websites_to_monitor(self):\n websites = []\n\n while True:\n\n website_instance = self.create_website()\n if website_instance:\n websites.append(website_instance)\n self.console_writer.add_dashboard(website_instance.dashboard)\n print(f\"Your website has been added to the monitoring list\")\n\n if not self.user_wants_more(websites):\n break\n\n return websites", "def get_health_dashboard(self):\n result = {}\n fabric_switches_dns, fabric_switches_rns = self.get_fabric_switches()\n for fabric_switch in fabric_switches_rns:\n result[fabric_switch] = {}\n # Switch health\n Health_Inst_mo = self.moDir.lookupByDn('topology/pod-1/' + fabric_switch + '/sys/health')\n result[fabric_switch]['Health'] = Health_Inst_mo.cur\n\n # Switch Policy CAM table\n cam_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityPolUsage5min')\n result[fabric_switch]['Policy CAM table'] = cam_usage_mo.polUsageCum + ' of ' + cam_usage_mo.polUsageCapCum\n\n # Switch MAC table\n multicast_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityMcastUsage5min')\n result[fabric_switch]['Multicast'] = multicast_usage_mo.localEpCum + ' of ' + multicast_usage_mo.localEpCapCum\n\n # VLAN\n vlan_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityVlanUsage5min')\n result[fabric_switch]['VLAN'] = vlan_usage_mo.totalCum + ' of ' + vlan_usage_mo.totalCapCum\n return result", "async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor", "def get_elbs(elbclient):\r\n try:\r\n resp = elbclient.describe_load_balancers()\r\n return list(map(\r\n lambda x:x['LoadBalancerName'],\r\n resp['LoadBalancerDescriptions']\r\n ))\r\n except Exception as ex:\r\n print(ex.message)\r\n return None", "def add_monitors_to_elb ( cloudwatch_conn, base_name, elb_type, base_topicarn, monitor_params ) :\n for monitor_rule in monitor_params :\n monitor_type = monitor_rule[ 'type' ]\n if monitor_type == 'HEALTHYHOST' :\n topic_arn = get_full_topicarn( base_topicarn, get_topicname( base_name, 'lb', 'healthyhost' ) )\n create_lb_unhealthy_alarm ( cloudwatch_conn,\n base_name,\n get_elb_name( base_name, elb_type ),\n monitor_rule.get( 'min-healthy-hosts', 3 ),\n topic_arn,\n monitor_rule.get( 'threshold', 5 ) )", "def get_dashboards(resource_root):\n return call(resource_root.get, DASHBOARDS_PATH, ApiDashboard, \\\n ret_is_list=True)", "def list_metrics(ctx, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n from wily.commands.list_metrics import list_metrics\n\n list_metrics(wrap)", "def list_alert(self, start_time=None, end_time=None, alert_ids=None, trigger_ids=None,\n statuses=None, severities=None, tags=None, thin=None):\n parms = {'startTime': start_time, 'endTime': end_time, 'alertIds': alert_ids,\n 'triggerIds': trigger_ids, 'statuses': statuses, 'severities': severities,\n 'tags': tags, 'thin': thin}\n entities = self._get(path='', params=parms)\n if entities:\n return entities\n return []", "def list_metrics(self):\n pass", "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "def get_healthcheck() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n cur.execute(\"SELECT * FROM events.healthchecks\")\n data = cur.fetchall()\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )", "def read():\n for host in _hosts:\n remaining = ssl_valid_time_remaining(host)\n remaining = remaining.total_seconds()\n remaining = int(remaining)\n\n collectd.info(\n 'tls-cert-monitor(host=%s): Reading data (data=%d)' %\n (host, remaining))\n\n val = collectd.Values(type='gauge', type_instance=host)\n\n val.plugin = 'tls-cert-monitor'\n val.dispatch(values=[remaining])" ]
[ "0.68901986", "0.6361477", "0.6159507", "0.6083955", "0.60717803", "0.57513934", "0.57083917", "0.5637575", "0.5617609", "0.5608417", "0.5604214", "0.54895395", "0.5396915", "0.5329629", "0.5327571", "0.53201497", "0.5307021", "0.5290351", "0.5265495", "0.51811594", "0.516197", "0.5126429", "0.50961035", "0.508901", "0.50800496", "0.5068928", "0.5057294", "0.50564104", "0.50501907", "0.5035058" ]
0.7332779
0
Updates a load balancer health monitor.
def update_health_monitor(self, health_monitor, body=None): return self.put(self.health_monitor_path % (health_monitor), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_monitor(request, **kwargs):\n data = request.DATA\n monitor_id = data['monitor']['id']\n hm_type = data['monitor']['type']\n\n conn = get_sdk_connection(request)\n healthmonitor_kwargs = {\n 'delay': data['monitor'].get('delay'),\n 'timeout': data['monitor'].get('timeout'),\n 'max_retries': data['monitor'].get('max_retries'),\n 'max_retries_down': data['monitor'].get('max_retries_down'),\n 'admin_state_up': data['monitor'].get('admin_state_up'),\n 'name': data['monitor'].get('name')\n }\n if hm_type in ('HTTP', 'HTTPS'):\n healthmonitor_kwargs.update({\n 'http_method': data['monitor'].get('http_method'),\n 'url_path': data['monitor'].get('url_path'),\n 'expected_codes': data['monitor'].get('expected_codes')\n })\n\n healthmonitor = conn.load_balancer.update_health_monitor(\n monitor_id,\n **healthmonitor_kwargs\n )\n\n return _get_sdk_object_dict(healthmonitor)", "def update_healthmonitor(self, context, old_healthmonitor, healthmonitor):\n old_val, new_val = self.get_diff_of_dict(\n old_healthmonitor, healthmonitor)\n LOG.info(\"Received request 'Update Pool Health Monitor' for \"\n \"Health monitor:%(hm)s with new Param:%(new_val)s and \"\n \"old Param:%(old_val)s\",\n {'hm': healthmonitor['id'],\n 'old_val': old_val,\n 'new_val': new_val})\n arg_dict = {'context': context,\n lb_const.OLD_HEALTHMONITOR: old_healthmonitor,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])", "def post_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):\n pass", "def put(self, request, health_monitor_id):\n update_monitor(request)", "def pre_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):\n pass", "def test_update_health_monitor(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.UpdateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--timeout', '5'],\r\n {'timeout': '5', })", "def update_health(self, health):\n session = db_api.get_session()\n\n # We need to see if all of the listeners are reporting in\n expected_listener_count = 0\n lbs_on_amp = self.amphora_repo.get_all_lbs_on_amphora(session,\n health['id'])\n for lb in lbs_on_amp:\n listener_count = self.listener_repo.count(session,\n load_balancer_id=lb.id)\n expected_listener_count += listener_count\n\n listeners = health['listeners']\n\n # Do not update amphora health if the reporting listener count\n # does not match the expected listener count\n if len(listeners) == expected_listener_count:\n\n # if the input amphora is healthy, we update its db info\n self.amphora_health_repo.replace(session, health['id'],\n last_update=(datetime.\n datetime.utcnow()))\n else:\n LOG.warning('Amphora %(id)s health message reports %(found)i '\n 'listeners when %(expected)i expected',\n {'id': health['id'], 'found': len(listeners),\n 'expected': expected_listener_count})\n\n # We got a heartbeat so lb is healthy until proven otherwise\n lb_status = constants.ONLINE\n\n # update listener and nodes db information\n for listener_id, listener in listeners.items():\n\n listener_status = None\n # OPEN = HAProxy listener status nbconn < maxconn\n if listener.get('status') == constants.OPEN:\n listener_status = constants.ONLINE\n # FULL = HAProxy listener status not nbconn < maxconn\n elif listener.get('status') == constants.FULL:\n listener_status = constants.DEGRADED\n if lb_status == constants.ONLINE:\n lb_status = constants.DEGRADED\n else:\n LOG.warning(('Listener %(list)s reported status of '\n '%(status)s'), {'list': listener_id,\n 'status': listener.get('status')})\n\n try:\n if listener_status is not None:\n self._update_status_and_emit_event(\n session, self.listener_repo, constants.LISTENER,\n listener_id, listener_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Listener %s is not in DB\", listener_id)\n\n pools = listener['pools']\n for pool_id, pool in pools.items():\n\n pool_status = None\n # UP = HAProxy backend has working or no servers\n if pool.get('status') == constants.UP:\n pool_status = constants.ONLINE\n # DOWN = HAProxy backend has no working servers\n elif pool.get('status') == constants.DOWN:\n pool_status = constants.ERROR\n lb_status = constants.ERROR\n else:\n LOG.warning(('Pool %(pool)s reported status of '\n '%(status)s'), {'pool': pool_id,\n 'status': pool.get('status')})\n\n members = pool['members']\n for member_id, status in members.items():\n\n member_status = None\n if status == constants.UP:\n member_status = constants.ONLINE\n elif status == constants.DOWN:\n member_status = constants.ERROR\n if pool_status == constants.ONLINE:\n pool_status = constants.DEGRADED\n if lb_status == constants.ONLINE:\n lb_status = constants.DEGRADED\n elif status == constants.NO_CHECK:\n member_status = constants.NO_MONITOR\n else:\n LOG.warning('Member %(mem)s reported status of '\n '%(status)s', {'mem': member_id,\n 'status': status})\n\n try:\n if member_status is not None:\n self._update_status_and_emit_event(\n session, self.member_repo, constants.MEMBER,\n member_id, member_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Member %s is not able to update \"\n \"in DB\", member_id)\n\n try:\n if pool_status is not None:\n self._update_status_and_emit_event(\n session, self.pool_repo, constants.POOL,\n pool_id, pool_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Pool %s is not in DB\", pool_id)\n\n # Update the load balancer status last\n # TODO(sbalukoff): This logic will need to be adjusted if we\n # start supporting multiple load balancers per amphora\n lb_id = self.amphora_repo.get(\n session, id=health['id']).load_balancer_id\n if lb_id is not None:\n try:\n self._update_status_and_emit_event(\n session, self.loadbalancer_repo,\n constants.LOADBALANCER, lb_id, lb_status\n )\n except sqlalchemy.orm.exc.NoResultFound:\n LOG.error(\"Load balancer %s is not in DB\", lb_id)", "def get_health_monitor(self, loadbalancer):\n return loadbalancer.get_health_monitor()", "def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n abd = attemptsBeforeDeactivation\n return loadbalancer.add_health_monitor(type=type, delay=delay,\n timeout=timeout, attemptsBeforeDeactivation=abd, path=path,\n statusRegex=statusRegex, bodyRegex=bodyRegex,\n hostHeader=hostHeader)", "def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),\n 'pool_id': request.DATA.get('parentResourceId')}\n return create_health_monitor(request, **kwargs)", "def post_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n req_body = {\"healthMonitor\": {\n \"type\": type,\n \"delay\": delay,\n \"timeout\": timeout,\n \"attemptsBeforeDeactivation\": attemptsBeforeDeactivation,\n }}\n uptype = type.upper()\n if uptype.startswith(\"HTTP\"):\n lb = self._get_lb(loadbalancer)\n if uptype != lb.protocol:\n raise exc.ProtocolMismatch(\"Cannot set the Health Monitor type \"\n \"to '%s' when the Load Balancer's protocol is '%s'.\" %\n (type, lb.protocol))\n if not all((path, statusRegex, bodyRegex)):\n raise exc.MissingHealthMonitorSettings(\"When creating an HTTP(S) \"\n \"monitor, you must provide the 'path', 'statusRegex' and \"\n \"'bodyRegex' parameters.\")\n body_hm = req_body[\"healthMonitor\"]\n body_hm[\"path\"] = path\n body_hm[\"statusRegex\"] = statusRegex\n body_hm[\"bodyRegex\"] = bodyRegex\n if hostHeader:\n body_hm[\"hostHeader\"] = hostHeader\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "def post_loadbalancer_healthmonitor_read(self, resource_id, resource_dict):\n pass", "def update_stats(self, health_message):\n session = db_api.get_session()\n\n amphora_id = health_message['id']\n listeners = health_message['listeners']\n for listener_id, listener in listeners.items():\n\n stats = listener.get('stats')\n stats = {'bytes_in': stats['rx'], 'bytes_out': stats['tx'],\n 'active_connections': stats['conns'],\n 'total_connections': stats['totconns'],\n 'request_errors': stats['ereq']}\n LOG.debug(\"Updating listener stats in db and sending event.\")\n LOG.debug(\"Listener %s / Amphora %s stats: %s\",\n listener_id, amphora_id, stats)\n self.listener_stats_repo.replace(\n session, listener_id, amphora_id, **stats)\n\n listener_stats = self.get_listener_stats(session, listener_id)\n self.emit(\n 'listener_stats', listener_id, listener_stats.get_stats())\n\n listener_db = self.repo_listener.get(session, id=listener_id)\n lb_stats = self.get_loadbalancer_stats(\n session, listener_db.load_balancer_id)\n self.emit('loadbalancer_stats',\n listener_db.load_balancer_id, lb_stats.get_stats())", "def health(self, new_health: int) -> None:", "def get_health_monitor(self, loadbalancer):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_get(uri)\n return body.get(\"healthMonitor\", {})", "def show_health_monitor(self, health_monitor, **_params):\r\n return self.get(self.health_monitor_path % (health_monitor),\r\n params=_params)", "def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)", "def pre_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def health(self, health):\n\n self._health = health", "def health(self, health):\n\n self._health = health", "def health(self, health):\n\n self._health = health", "def health(self, health):\n\n self._health = health", "def create_health_monitor(self, body=None):\r\n return self.post(self.health_monitors_path, body=body)", "def set_health(self, health):\n self.__health = health", "def create_load_balancer_monitor(\n self, profile_name=\"LB_Test_Monitor\", resource_type=\"LBHttpMonitorProfile\",\n monitor_port=\"80\", request_url=\"/\", response_codes=None, interval=5, timeout=5,\n rise_count=3, fall_count=3):\n LB_MONITOR_PROFILE = \"https://{ip}/policy/api/v1/infra/lb-monitor-profiles/{profile_name}\"\n url = LB_MONITOR_PROFILE.format(ip=self.nsxt_ip, profile_name=profile_name)\n print('Starting PUT call to create Monitor Profile : %s' % url)\n put_status = None\n response_codes = [200] if not response_codes else response_codes\n json_payload = {\n \"request_url\": request_url, \"response_status_codes\": response_codes,\n \"resource_type\": resource_type, \"monitor_port\": monitor_port,\n \"interval\": interval, \"timeout\": timeout, \"rise_count\": rise_count,\n \"fall_count\": fall_count}\n monitor_id = None\n monitor_path = None\n try:\n response = self.rest.put(url, json_payload, self.headers, 200, auth=(\n self.nsxt_user, self.nsxt_pwd), is_json=True)\n put_status = response.status_code\n root = json.loads(response.text)\n monitor_id = root[\"id\"]\n monitor_path = root[\"path\"]\n print(\"monitor_id:%s | monitor_path:%s\" % (\n monitor_id, monitor_path))\n except Exception as e:\n print(traceback.format_exc())\n print('Exception in creating monitor profile %s' % e)\n return monitor_id, monitor_path", "def associate_health_monitor(self, pool, body):\r\n return self.post(self.associate_pool_health_monitors_path % (pool),\r\n body=body)", "def get(self, request, health_monitor_id):\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.find_health_monitor(health_monitor_id)\n return _get_sdk_object_dict(health_mon)", "def create_health_monitor(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.create_health_monitor(\n type=data['monitor']['type'],\n delay=data['monitor']['delay'],\n timeout=data['monitor']['timeout'],\n max_retries=data['monitor']['max_retries'],\n max_retries_down=data['monitor']['max_retries_down'],\n pool_id=kwargs['pool_id'],\n http_method=data['monitor'].get('http_method'),\n url_path=data['monitor'].get('url_path'),\n expected_codes=data['monitor'].get('expected_codes'),\n admin_state_up=data['monitor'].get('admin_state_up'),\n name=data['monitor'].get('name')\n )\n\n return _get_sdk_object_dict(health_mon)" ]
[ "0.7534068", "0.7220566", "0.7034508", "0.6964282", "0.6887532", "0.68862224", "0.6697243", "0.6198273", "0.61803037", "0.614257", "0.6064578", "0.6044781", "0.6024156", "0.5986242", "0.5931802", "0.5792398", "0.5688219", "0.5687414", "0.56839436", "0.5664143", "0.56462824", "0.56462824", "0.56462824", "0.56462824", "0.56163543", "0.56103164", "0.5594963", "0.559053", "0.558907", "0.5568198" ]
0.7613361
0
Associate specified load balancer health monitor and pool.
def associate_health_monitor(self, pool, body): return self.post(self.associate_pool_health_monitors_path % (pool), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def pre_loadbalancer_healthmonitor_create(self, resource_dict):\n pass", "def create_healthmonitor(self, context, healthmonitor):\n LOG.info(\"Received request 'Create Pool Health Monitor' for\"\n \"Health monitor:%(hm)s\",\n {'hm': healthmonitor['id']})\n arg_dict = {'context': context,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_CREATE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])", "def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),\n 'pool_id': request.DATA.get('parentResourceId')}\n return create_health_monitor(request, **kwargs)", "def create_health_monitor(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.create_health_monitor(\n type=data['monitor']['type'],\n delay=data['monitor']['delay'],\n timeout=data['monitor']['timeout'],\n max_retries=data['monitor']['max_retries'],\n max_retries_down=data['monitor']['max_retries_down'],\n pool_id=kwargs['pool_id'],\n http_method=data['monitor'].get('http_method'),\n url_path=data['monitor'].get('url_path'),\n expected_codes=data['monitor'].get('expected_codes'),\n admin_state_up=data['monitor'].get('admin_state_up'),\n name=data['monitor'].get('name')\n )\n\n return _get_sdk_object_dict(health_mon)", "def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def post_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):\n pass", "def pre_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):\n pass", "def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n abd = attemptsBeforeDeactivation\n return loadbalancer.add_health_monitor(type=type, delay=delay,\n timeout=timeout, attemptsBeforeDeactivation=abd, path=path,\n statusRegex=statusRegex, bodyRegex=bodyRegex,\n hostHeader=hostHeader)", "def post_loadbalancer_pool_create(self, resource_dict):\n pass", "def pre_loadbalancer_pool_create(self, resource_dict):\n pass", "def post_loadbalancer_healthmonitor_read(self, resource_id, resource_dict):\n pass", "def add_monitors_to_elb ( cloudwatch_conn, base_name, elb_type, base_topicarn, monitor_params ) :\n for monitor_rule in monitor_params :\n monitor_type = monitor_rule[ 'type' ]\n if monitor_type == 'HEALTHYHOST' :\n topic_arn = get_full_topicarn( base_topicarn, get_topicname( base_name, 'lb', 'healthyhost' ) )\n create_lb_unhealthy_alarm ( cloudwatch_conn,\n base_name,\n get_elb_name( base_name, elb_type ),\n monitor_rule.get( 'min-healthy-hosts', 3 ),\n topic_arn,\n monitor_rule.get( 'threshold', 5 ) )", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,\n attemptsBeforeDeactivation=3, path=\"/\", statusRegex=None,\n bodyRegex=None, hostHeader=None):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n req_body = {\"healthMonitor\": {\n \"type\": type,\n \"delay\": delay,\n \"timeout\": timeout,\n \"attemptsBeforeDeactivation\": attemptsBeforeDeactivation,\n }}\n uptype = type.upper()\n if uptype.startswith(\"HTTP\"):\n lb = self._get_lb(loadbalancer)\n if uptype != lb.protocol:\n raise exc.ProtocolMismatch(\"Cannot set the Health Monitor type \"\n \"to '%s' when the Load Balancer's protocol is '%s'.\" %\n (type, lb.protocol))\n if not all((path, statusRegex, bodyRegex)):\n raise exc.MissingHealthMonitorSettings(\"When creating an HTTP(S) \"\n \"monitor, you must provide the 'path', 'statusRegex' and \"\n \"'bodyRegex' parameters.\")\n body_hm = req_body[\"healthMonitor\"]\n body_hm[\"path\"] = path\n body_hm[\"statusRegex\"] = statusRegex\n body_hm[\"bodyRegex\"] = bodyRegex\n if hostHeader:\n body_hm[\"hostHeader\"] = hostHeader\n resp, body = self.api.method_put(uri, body=req_body)\n return body", "def post_loadbalancer_pool_update(self, resource_id, resource_dict):\n pass", "def create_load_balancer_monitor(\n self, profile_name=\"LB_Test_Monitor\", resource_type=\"LBHttpMonitorProfile\",\n monitor_port=\"80\", request_url=\"/\", response_codes=None, interval=5, timeout=5,\n rise_count=3, fall_count=3):\n LB_MONITOR_PROFILE = \"https://{ip}/policy/api/v1/infra/lb-monitor-profiles/{profile_name}\"\n url = LB_MONITOR_PROFILE.format(ip=self.nsxt_ip, profile_name=profile_name)\n print('Starting PUT call to create Monitor Profile : %s' % url)\n put_status = None\n response_codes = [200] if not response_codes else response_codes\n json_payload = {\n \"request_url\": request_url, \"response_status_codes\": response_codes,\n \"resource_type\": resource_type, \"monitor_port\": monitor_port,\n \"interval\": interval, \"timeout\": timeout, \"rise_count\": rise_count,\n \"fall_count\": fall_count}\n monitor_id = None\n monitor_path = None\n try:\n response = self.rest.put(url, json_payload, self.headers, 200, auth=(\n self.nsxt_user, self.nsxt_pwd), is_json=True)\n put_status = response.status_code\n root = json.loads(response.text)\n monitor_id = root[\"id\"]\n monitor_path = root[\"path\"]\n print(\"monitor_id:%s | monitor_path:%s\" % (\n monitor_id, monitor_path))\n except Exception as e:\n print(traceback.format_exc())\n print('Exception in creating monitor profile %s' % e)\n return monitor_id, monitor_path", "def pre_loadbalancer_pool_update(self, resource_id, resource_dict):\n pass", "def attach_elastic_load_balancer(ElasticLoadBalancerName=None, LayerId=None):\n pass", "def post_loadbalancer_member_create(self, resource_dict):\n pass", "def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')", "def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()", "def update_healthmonitor(self, context, old_healthmonitor, healthmonitor):\n old_val, new_val = self.get_diff_of_dict(\n old_healthmonitor, healthmonitor)\n LOG.info(\"Received request 'Update Pool Health Monitor' for \"\n \"Health monitor:%(hm)s with new Param:%(new_val)s and \"\n \"old Param:%(old_val)s\",\n {'hm': healthmonitor['id'],\n 'old_val': old_val,\n 'new_val': new_val})\n arg_dict = {'context': context,\n lb_const.OLD_HEALTHMONITOR: old_healthmonitor,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])", "def post_loadbalancer_pool_read(self, resource_id, resource_dict):\n pass", "def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})", "def addpool(miner: Miner, pool):\n api = MinerApi(host=miner.ipaddress, port=int(miner.port))\n jaddpool = api.addpool(\"{0},{1},{2}\".format(pool.url, pool.user, \"x\"))\n return jaddpool[\"STATUS\"][0][\"Msg\"]", "def get_lbaas_agent_hosting_pool(self, pool, **_params):\r\n return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,\r\n params=_params)", "def get(self, request):\n pool_id = request.GET.get('poolId')\n conn = get_sdk_connection(request)\n health_monitor_list = _sdk_object_to_list(\n conn.load_balancer.health_monitors(\n project_id=request.user.project_id\n )\n )\n\n if pool_id:\n health_monitor_list = self._filter_health_monitors(\n health_monitor_list,\n pool_id)\n return {'items': health_monitor_list}", "def pre_loadbalancer_healthmonitor_read(self, resource_id):\n pass" ]
[ "0.6778952", "0.67355716", "0.648745", "0.641679", "0.6409335", "0.63331187", "0.6244122", "0.624059", "0.621399", "0.61304736", "0.5918235", "0.589895", "0.58703053", "0.5826062", "0.58003503", "0.57517254", "0.574669", "0.5717543", "0.5531685", "0.55174804", "0.55116934", "0.55083305", "0.5494325", "0.54505306", "0.5442473", "0.54005176", "0.53824204", "0.533306", "0.53052425", "0.5289417" ]
0.8073447
0
Disassociate specified load balancer health monitor and pool.
def disassociate_health_monitor(self, pool, health_monitor): path = (self.disassociate_pool_health_monitors_path % {'pool': pool, 'health_monitor': health_monitor}) return self.delete(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_healthmonitor(self, context, healthmonitor):\n LOG.info(\"Received request 'Delete Pool Health Monitor' for \"\n \"Health monitor:%(hm)s\",\n {'hm': healthmonitor['id']})\n arg_dict = {'context': context,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_DELETE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])", "def pre_loadbalancer_healthmonitor_delete(self, resource_id):\n pass", "def post_loadbalancer_healthmonitor_delete(self, resource_id, resource_dict):\n pass", "def l7pool_del(env, identifier):\n mgr = SoftLayer.LoadBalancerManager(env.client)\n try:\n mgr.del_lb_l7_pool(identifier)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def delete_health_monitor(self, loadbalancer):\n return loadbalancer.delete_health_monitor()", "def delete(self, request, pool_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_pool,\n pool_id,\n load_balancer_getter=pool_get_load_balancer_id,\n resource_id=pool_id)", "def detach_elastic_load_balancer(ElasticLoadBalancerName=None, LayerId=None):\n pass", "def pre_loadbalancer_pool_delete(self, resource_id):\n pass", "def delete(self, request, health_monitor_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_health_monitor,\n health_monitor_id,\n ignore_missing=True,\n load_balancer_getter=health_monitor_get_load_balancer_id,\n resource_id=health_monitor_id)", "def associate_health_monitor(self, pool, body):\r\n return self.post(self.associate_pool_health_monitors_path % (pool),\r\n body=body)", "def post_loadbalancer_pool_delete(self, resource_id, resource_dict):\n pass", "def delete_health_monitor(self, loadbalancer):\n uri = \"/loadbalancers/%s/healthmonitor\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_delete(uri)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def delete(self):\n self._lbcall('delete_pool', [self._name])", "def test_dhcp_pool_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|pool_id', dhcp_pool.delete,\n {'pool': {}},\n delete_args=['dhcpPoolID'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'esg_id', 'poolID': 'pool_id'}\n }\n )", "def delete_pool(self, argu):\n\n if not argu:\n LOG.error(\"In delete_pool, it should not pass the None.\")\n\n # delete policy\n self._delete_policy(\n argu['listener_id'],\n argu['session_persistence_type'],\n argu['lb_algorithm']\n )\n\n cmd_apv_no_group = ADCDevice.no_group(argu['pool_id'])\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_no_group)", "def unregister(self, target, hostname, listener_type):", "def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')", "def consul_deregister(self):\n try:\n if self.svc_name not in self.consul.agent.services():\n return\n self.log.info(\"consul-deregister\")\n self.consul.agent.service.deregister(\"qemu-{}\".format(self.name))\n except requests.exceptions.ConnectionError:\n pass\n except Exception:\n self.log.exception(\"consul-deregister-failed\", exc_info=True)", "def undeploy_instance(self, pool_id):\n LOG.debug('vLB Driver::undeploy_instance')\n vlb_value = vlb_db.get_vlb_from_pool_id(pool_id['pool']['id'])\n nova_client = self._get_nova_client()\n instance = nova_client.servers.find(name=vlb_value['name'])\n instance.delete()\n\n vlb_db.delete_vlb(pool_id['pool']['id'])", "def delete_balancer(ctx):\n if self.balancer_exists():\n self.delete_balancer()\n ctx.info('Successfully deleted load balancer {}:'.format(self.get_balancer_name()))\n else:\n ctx.info('Load balancer {} does not exist, nothing to delete.'.format(\n self.get_balancer_name()\n ))", "def reregister_elb_instances(profile):\n if 'elb_names' in profile:\n conn = elb_conn[profile['region']]\n elbs = conn.get_all_load_balancers(profile['elb_names'])\n for elb in elbs:\n instance_ids = _get_instance_ids(elb.instances)\n print \"Reregistering \" + elb.name + \" instances.\"\n try:\n conn.deregister_instances(elb.name, instance_ids)\n conn.register_instances(elb.name, instance_ids)\n except Exception, e:\n print elb.name + \"has no instances.\"\n # to avoid elb rate limit throttling\n time.sleep(1)", "def delete_device_pool(arn=None):\n pass", "async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)", "def unassign_instance(InstanceId=None):\n pass", "def delete_pool(self, context, pool):\n LOG.info(\"Received request 'Delete Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool,\n }\n self._send_event(lb_const.EVENT_DELETE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])", "def remove_pool(ctx, pool_name):\n \n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete pool, which is not present.\")\n entryFound = True\n\n binding_dict = config_db.get_table('NAT_BINDINGS')\n if binding_dict and entryFound == False: \n for binding_name, binding_values in binding_dict.items():\n if binding_values['nat_pool'] == pool_name:\n click.echo(\"Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!\".format(binding_name))\n entryFound = True\n break\n\n if entryFound == False:\n config_db.set_entry(table, key, None)", "def delete_health_monitor(self, health_monitor):\r\n return self.delete(self.health_monitor_path % (health_monitor))", "def delete_pool(self, pool):\r\n return self.delete(self.pool_path % (pool))", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")" ]
[ "0.6170834", "0.61175746", "0.6076449", "0.59184957", "0.5815427", "0.57858783", "0.57855165", "0.57609636", "0.5755864", "0.57522935", "0.56465745", "0.5572244", "0.54569775", "0.5376652", "0.5369299", "0.5314224", "0.52987087", "0.52365905", "0.5153941", "0.5120014", "0.5117562", "0.50942266", "0.5092382", "0.5091206", "0.5083095", "0.50825", "0.50738144", "0.50624824", "0.50489694", "0.5039105" ]
0.7618305
0
Fetches information of a certain agent.
def show_agent(self, agent, **_params): return self.get(self.agent_path % (agent), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_agent(self, account_id, agent_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents/' + str(agent_id), filters)", "def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)", "def agent(self):\n return self.__agent", "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def getAgentID(self):\n\t\treturn self.agentID", "def retrieve(cls: Type[T], agent_id: int, datastore: Datastore) -> T:\n agent = cls.optionally_retrieve(agent_id, datastore)\n if agent is None:\n raise NotFound\n return agent", "def agent(self) -> Entity:\n return self.__agent", "def get_agents(self, state=None, agent_id=None):\n query = \"SELECT * FROM agents\"\n \n if state:\n query += \" WHERE state='\" + state + \"'\"\n elif agent_id:\n query += \" WHERE agent_id='\" + str(agent_id) + \"'\"\n \n response = self.execute(query + \";\")\n \n if response:\n if agent_id:\n return response.fetchall()[0]\n else:\n return response.fetchall()\n else:\n return False", "def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])", "def print_agent(agent):\n agent_string = \"FOUND:\\n\"\n for key in place_detail_keys:\n agent_string += \"\\t%s: %s\\n\" % (key, agent[key])\n log.debug(agent_string)", "def do_GET(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.server.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent_id not found\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')\n return\n\n if not agent['active']:\n common.echo_json_response(self, 404, \"agent_id not yet active\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')\n return\n\n response = {\n 'aik': agent['aik'],\n 'ek': agent['ek'],\n 'ekcert': agent['ekcert'],\n 'regcount': agent['regcount'],\n }\n\n if agent['virtual']:\n response['provider_keys']= agent['provider_keys']\n\n common.echo_json_response(self, 200, \"Success\", response)\n logger.info('GET returning 200 response for agent_id:' + agent_id)\n else:\n # return the available registered uuids from the DB\n json_response = self.server.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')\n\n return", "def get(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.db.get_agent(agent_id)\n if agent is not None:\n response = cloud_verifier_common.process_get_status(agent)\n common.echo_json_response(self, 200, \"Success\", response)\n #logger.info('GET returning 200 response for agent_id: ' + agent_id)\n\n else:\n #logger.info('GET returning 404 response. agent id: ' + agent_id + ' not found.')\n common.echo_json_response(self, 404, \"agent id not found\")\n else:\n # return the available keys in the DB\n json_response = self.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')", "def observe(self, agent: str) -> ObsType | None:\n raise NotImplementedError", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def get(self, name):\n validate_inputs({'name': name})\n return get_storage_manager().get(models.Agent, name)", "def server_agent_show(ctx, args):\n for agent_id in args:\n data = ctx.obj.get_agent_by_agent_id(agent_id)\n output_json_data(data)", "def get_agent_data(self, attribute):\n\t\treturn getattr(self, attribute)", "def get_effective_agent(self):\n raise Unimplemented()", "def agents_status(self):\n return self._get('agents/status')", "def get_reply_target(self, agent, collective):", "def agent_id(self):\n return self._agent_id", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def do_examine(self, args):\n\n name = pick(\"Agent\", self.model.get_agent_names())\n if name is not None:\n agent = self.model.get_agent(name)\n agent.examine()", "def RetrieveAgentWithCapability(**argd):\n flag, ret = CGateway.core.RetrieveAgentWithCapability(argd[\"session\"], argd[\"name\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse({'return': ret})", "def self(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/self')", "def run(self, agent_args=None):\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)", "def get_agent_of_model(self, model):\n if model.id in self.agents:\n return self.agents[model.id]\n elif model.id in self.dead_agent_store:\n return self.dead_agent_store[model.id]\n raise ValueError('agent of given model does not exist')", "def agent_class(self):\r\n return self._agent_class", "def agent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"agent_id\")", "def get_agent_by_host(agent_host):\n session = db_api.get_session()\n with session.begin(subtransactions=True):\n query = session.query(agents_db.Agent)\n agent = query.filter(\n agents_db.Agent.host == agent_host,\n agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS,\n agents_db.Agent.admin_state_up.is_(True)).first()\n if agent and agent.is_active:\n return agent\n return None" ]
[ "0.6484612", "0.63000304", "0.62550956", "0.61938536", "0.61057025", "0.6010476", "0.59606665", "0.5949737", "0.5931788", "0.59045815", "0.587425", "0.5830453", "0.5784631", "0.5692295", "0.56844026", "0.56782377", "0.56639445", "0.560746", "0.55718106", "0.5533608", "0.55251825", "0.5509251", "0.54513425", "0.5390991", "0.5383485", "0.53626865", "0.5346593", "0.5346058", "0.53354263", "0.5313634" ]
0.74993396
0
Deletes the specified agent.
def delete_agent(self, agent): return self.delete(self.agent_path % (agent))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, agent_id):\n self._client.delete('scanners/1/agents/%(agent_id)s', path_params={'agent_id': agent_id})\n return True", "def delete(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE returning 400 response. uri not supported: ' + self.request.path)\n\n agent = self.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('DELETE returning 404 response. agent id: ' + agent_id + ' not found.')\n return\n\n op_state = agent['operational_state']\n if op_state == cloud_verifier_common.CloudAgent_Operational_State.SAVED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TERMINATED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.INVALID_QUOTE:\n self.db.remove_agent(agent_id)\n common.echo_json_response(self, 200, \"Success\")\n logger.info('DELETE returning 200 response for agent id: ' + agent_id)\n else:\n self.db.update_agent(agent_id, 'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TERMINATED)\n common.echo_json_response(self, 202, \"Accepted\")\n logger.info('DELETE returning 202 response for agent id: ' + agent_id)", "def delete(self):\n self.model.remove_agents(self)", "def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return", "def deleteAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def delete(self):\r\n try:\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent_to_delete = Agent.filter(Agent.uuid == agent_uuid).first()\r\n sys_id = (\r\n System.select().where(System.agent_uuid == agent_to_delete).execute()\r\n )\r\n if sys_id:\r\n logger.error(\"Agent not deleted\")\r\n return web.Response(text=\"Agent not deleted.\")\r\n else:\r\n agent_to_delete.delete_instance()\r\n logger.info(\"Agent deleted successfully\")\r\n return web.Response(text=\"Agent deleted successfully.\")\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=error_message, status=500)", "def remove_agent(self):\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)\n\n if self.agent_type == \"zombie\":\n self.model.infected -= 1\n elif self.agent_type == \"human\":\n self.model.susceptible -= 1\n del self", "def delete_agent(self, group_name, id, quite=True):\n self._out.append(('_simulation', 0.5, (group_name, id, quite)))", "def test_delete_network_from_dhcp_agent(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.\n\n with self.override_role():\n self.agents_client.delete_network_from_dhcp_agent(\n self.agent['id'], network_id=network_id)", "def doRegistrarDelete(\n registrar_ip: str, registrar_port: str, agent_id: str, tls_context: Optional[ssl.SSLContext]\n) -> Dict[str, Any]:\n\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", True, tls_context=tls_context)\n response = client.delete(f\"/v{api_version}/agents/{agent_id}\")\n response_body: Dict[str, Any] = response.json()\n\n if response.status_code == 200:\n logger.debug(\"Registrar deleted.\")\n else:\n logger.warning(\"Status command response: %s Unexpected response from registrar.\", response.status_code)\n keylime_logging.log_http_response(logger, logging.WARNING, response_body)\n\n return response_body", "def remove_agents(self, agents):\n for agent in list(make_list(agents)): # Soft copy as list is changed\n self._agents.remove(agent)\n agent.envs.remove(self)", "def deleteAgentCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, *args, **kwargs):\n # Delete listener\n if self.db.listener:\n self.db.listener.delete()\n \n # Delete puppets\n puppetlist = [puppet for puppet in\n search.search_tag(self.key+\"-puppet\")]\n for puppet in puppetlist:\n puppet.delete()\n\n # Delete bot\n self.db.ev_location.msg_contents(\"Bot commencing shut-down process.\")\n super(ServerBot, self).delete(*args, **kwargs)", "def test_delete_router_from_l3_agent(self):\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])\n\n with self.override_role():\n self.agents_client.delete_router_from_l3_agent(\n self.agent['id'], router_id=self.router['id'])", "def remove_router_from_l3_agent(self, l3_agent, router_id):\r\n return self.delete((self.agent_path + self.L3_ROUTERS + \"/%s\") % (\r\n l3_agent, router_id))", "def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)", "def remove(directory, host, name, stale, username, password):\n ids = (username, password)\n if name:\n agent = Agent.from_directory(join(directory, name))\n if host and agent.host != host:\n click.echo(f\"Agent host {agent.host} does not match {host}\")\n return\n agent.remove(ids, stale)\n else:\n manager = AgentManager(directory, host)\n for agent in manager.agents:\n agent.remove(ids, stale)", "def exit(self, env=None):\n env = self._find_env(env)\n env.remove_agents(self)", "def delete(self, app, role, privilege):\n \n # check user's privileges\n h.checkAccess('delete')\n\n model = RolesModel()\n model.deletePrivilege( app, role, privilege )\n\n # returns empty reply", "def revoke_agent(request):\n from .models import Agent\n\n request.agent = Agent.untrusted_agent(request.user)", "def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(self):\r\n self.connection.delete_distribution(self.id, self.etag)", "def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')", "def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def delete(self):\n self.vera.delete_scene(self)", "def delete_robot(self, robot):\n if len(self.__robots) == 0 or robot not in self.__robots:\n return\n\n robot_index = self.__robots.index(robot)\n\n # Clear the robot visuals\n self.__robots[robot_index].set_reference_visibility(False)\n self.__robots[robot_index].set_robot_visibility(False)\n\n # Remove from UI\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n del new_list[robot_index]\n del self.__robots[robot_index]\n del self.__teachpanel[robot_index]\n\n self.__selected_robot = 0\n # Update UI\n self.__reload_caption(new_list)\n # Select the top item\n if len(self.__ui_controls.get('menu_robots').choices) > 0:\n self.__ui_controls.get('menu_robots').index = 0", "def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete_vm(client, resource_group_name, vm_name):\n return client.delete(resource_group_name, vm_name)" ]
[ "0.7904113", "0.77537805", "0.76021314", "0.74562705", "0.7406257", "0.72061086", "0.6470654", "0.6370957", "0.6018436", "0.5851619", "0.58453286", "0.5785872", "0.5759275", "0.5755108", "0.5737678", "0.5718371", "0.56550926", "0.5606634", "0.5464975", "0.53717154", "0.5369984", "0.53290874", "0.53041476", "0.52881175", "0.526789", "0.52596927", "0.52210605", "0.5198692", "0.51967347", "0.5193443" ]
0.8920055
0
Fetch a network gateway.
def show_network_gateway(self, gateway_id, **_params): return self.get(self.network_gateway_path % gateway_id, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_gateway(self):\n return self.gateway", "async def get_gw(test_cli) -> str:\n gw_resp = await test_cli.get('/api/v6/gateway')\n gw_json = await gw_resp.json\n return gw_json['url']", "def get_network_default_gateway(self):\n return self.mycam.devicemgmt.GetNetworkDefaultGateway()", "def get_internet_gateway(internet_gateway_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInternetGatewayResult:\n __args__ = dict()\n __args__['internetGatewayName'] = internet_gateway_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:managednetworkfabric/v20230615:getInternetGateway', __args__, opts=opts, typ=GetInternetGatewayResult).value\n\n return AwaitableGetInternetGatewayResult(\n annotation=pulumi.get(__ret__, 'annotation'),\n id=pulumi.get(__ret__, 'id'),\n internet_gateway_rule_id=pulumi.get(__ret__, 'internet_gateway_rule_id'),\n ipv4_address=pulumi.get(__ret__, 'ipv4_address'),\n location=pulumi.get(__ret__, 'location'),\n name=pulumi.get(__ret__, 'name'),\n network_fabric_controller_id=pulumi.get(__ret__, 'network_fabric_controller_id'),\n port=pulumi.get(__ret__, 'port'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))", "def nat_gateway(self) -> Optional['outputs.SubResourceResponse']:\n return pulumi.get(self, \"nat_gateway\")", "def get_gateway(self, node):\n router = None\n\n for ip, n in self._nodes_dict.items():\n\n if n is node:\n continue\n\n if n.is_gateway:\n return ip\n\n if not router and n.is_router:\n router = ip\n\n return router", "def get_default_gateway(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetDefaultGateway', self.handle)", "def gateway_config(self) -> 'outputs.GatewayConfigResponse':\n return pulumi.get(self, \"gateway_config\")", "def get_default_gateway(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetDefaultGateway', self.handle)", "def gateway(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gateway\")", "def get_network(self):\n return self._network", "def get(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.describe_internet_gateways()", "def get(self, request, format=None):\n site_owner = SiteOwner.objects.get(user=self.request.user)\n gateways = Gateway.objects.filter(site_owner=site_owner)\n serializer = GatewaySerializer(gateways, many=True)\n return Response(serializer.data)", "def fetchWANIP():\n logging.info(\"Trying to fetch WAN IP\")\n _wanIf = config.get(\"interface\", \"wan\")\n _wanip = None\n try:\n bus = dbus.SystemBus()\n proxy = bus.get_object(\"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager\")\n manager = dbus.Interface(proxy, \"org.freedesktop.NetworkManager\")\n devices = manager.GetDevices()\n for device in devices:\n devProxy = bus.get_object(\"org.freedesktop.NetworkManager\", device)\n devConfIface = dbus.Interface(devProxy, \"org.freedesktop.DBus.Properties\")\n devConf = devConfIface.GetAll(\"org.freedesktop.NetworkManager.Device\")\n if devConf['Interface'] == _wanIf:\n actConProxy = bus.get_object(\"org.freedesktop.NetworkManager\", devConf[\"ActiveConnection\"])\n actConIface = dbus.Interface(actConProxy, \"org.freedesktop.DBus.Properties\")\n actConConf = actConIface.GetAll(\"org.freedesktop.NetworkManager.Connection.Active\")\n actConIP4Proxy = bus.get_object(\"org.freedesktop.NetworkManager\", actConConf['Ip4Config'])\n actConIP4Iface = dbus.Interface(actConIP4Proxy, \"org.freedesktop.DBus.Properties\")\n actConIP4Conf = actConIP4Iface.GetAll(\"org.freedesktop.NetworkManager.IP4Config\")\n _wanip = actConIP4Conf[\"AddressData\"][0][\"address\"]\n for dnsEntry in actConIP4Conf[\"NameserverData\"]:\n wandns.append(dnsEntry[\"address\"])\n logging.info(f\"WAN DNS server fetched for {_wanIf} - {dnsEntry['address']}\")\n logging.info(f\"WAN IP fetched for {_wanIf} - {_wanip}\")\n except Exception as e:\n logging.error(\"Trying to fetch WAN IP error\")\n logging.error(e)\n # return WAN IP\n return _wanip", "def get_internet_gateway_output(internet_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInternetGatewayResult]:\n ...", "def get_default_gateway(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetDefaultGateway', self.handle)", "def get_network(self):\n return self.get_ip_network()[-1]", "def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])", "def gateway_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"gateway_url\")", "def get_internet_gateway(filters=None,internet_gateway_id=None,tags=None,opts=None):\n __args__ = dict()\n\n __args__['filters'] = filters\n __args__['internetGatewayId'] = internet_gateway_id\n __args__['tags'] = tags\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = utilities.get_version()\n __ret__ = pulumi.runtime.invoke('aws:ec2/getInternetGateway:getInternetGateway', __args__, opts=opts).value\n\n return AwaitableGetInternetGatewayResult(\n attachments=__ret__.get('attachments'),\n filters=__ret__.get('filters'),\n internet_gateway_id=__ret__.get('internetGatewayId'),\n owner_id=__ret__.get('ownerId'),\n tags=__ret__.get('tags'),\n id=__ret__.get('id'))", "def __get_network_routes(self):\n routes = []\n\n gws = netifaces.gateways()\n for k in gws.keys():\n if k == 'default':\n continue\n\n\t for r in gws[k]:\n (ip,interface,is_gateway) = r\n\n gw_name = \"{0}\".format(netifaces.address_families[k])\n\n routes.append({\n gw_name : {\n 'ip_address' : ip,\n 'interface' : interface,\n\t\t\t 'default' : is_gateway\n }\n \n }\n )\n\n return routes", "def get_gateway_ip(timeout=10):\n\n return get_default_route(timeout)[0]", "def get_nat(self, natgw):\n return self._get(_gw.Service, natgw)", "def GetNetwork(self, network, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n \"/%s/networks/%s\" % (GANETI_RAPI_VERSION, network),\n query, None)", "def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)", "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def get_ifconfig_gateway(dut, interface='eth0'):\n\n cmd = \"sudo route -n\"\n output = st.show(dut, cmd)\n gateway_list = [row['gateway'] for row in output if 'G' in row['flags'] and row['iface'] == interface]\n return None if len(gateway_list) == 0 else gateway_list[0]", "def network(self):\n return self._network", "def network(self):\n return self._network", "def network(self):\n return self._network" ]
[ "0.7077603", "0.65630823", "0.65052164", "0.64133066", "0.63255405", "0.6192412", "0.61779696", "0.61768025", "0.61318785", "0.61205274", "0.60998017", "0.6086011", "0.60561377", "0.6016003", "0.5989221", "0.5970194", "0.5968634", "0.59273136", "0.5889362", "0.58878154", "0.58579874", "0.5853172", "0.5852988", "0.5849826", "0.58346564", "0.5834077", "0.5782571", "0.5739476", "0.5739476", "0.5739476" ]
0.7128359
0
Create a new network gateway.
def create_network_gateway(self, body=None): return self.post(self.network_gateways_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def create_nat_gateway(ec2_client=None, module=None):\n try:\n result = ec2_client.create_nat_gateway(SubnetId=module.params.get('subnet_id'),\n AllocationId=module.params.get('allocation_id'))\n wait = module.params.get('wait')\n start_time = datetime.datetime.utcnow()\n results = dict(changed=True)\n if wait:\n wait_timeout = time.time() + module.params.get('wait_timeout')\n while wait_timeout > time.time():\n if 'nat_gateway' in results:\n break\n for present_status in get_nat_gateway_status_list(ec2_client=ec2_client, module=module):\n create_time = present_status.get('created')\n if present_status.get('state') == 'failed' and create_time.replace(tzinfo=None) >= start_time:\n module.fail_json(msg=\"Failed to create VPC NAT Gateway\")\n elif present_status.get('state') == 'available':\n results['nat_gateway'] = camel_dict_to_snake_dict(result['NatGateway'])\n break\n else:\n time.sleep(5)\n else:\n module.fail_json(msg=\"Waited too long for VPC NAT Gateway to be created.\")\n module.exit_json(**results)\n except botocore.exceptions.ClientError as ce:\n module.fail_json(msg=ce.message, **camel_dict_to_snake_dict(ce.response))", "def CreateNetwork(self, network_name, network, gateway=None, network6=None,\n gateway6=None, mac_prefix=None,\n add_reserved_ips=None, tags=None, dry_run=False,\n reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n if add_reserved_ips:\n add_reserved_ips = add_reserved_ips.split(\",\")\n\n if tags:\n tags = tags.split(\",\")\n\n body = {\n \"network_name\": network_name,\n \"gateway\": gateway,\n \"network\": network,\n \"gateway6\": gateway6,\n \"network6\": network6,\n \"mac_prefix\": mac_prefix,\n \"add_reserved_ips\": add_reserved_ips,\n \"tags\": tags,\n }\n\n return self._SendRequest(HTTP_POST, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, body)", "def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def create(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.create_internet_gateway()", "def create_nat_gateway(subnet_id, eip):\n response = EC2.create_nat_gateway(\n AllocationId=eip,\n SubnetId=subnet_id\n )\n return response", "def create_network(self, context, network):\n\n LOG.debug(_(\"QuantumRestProxyV2: create_network() called\"))\n\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context, network[\"network\"])\n net_name = network[\"network\"][\"name\"]\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\"), net_name)\n\n # create in DB\n new_net = super(QuantumRestProxyV2, self).create_network(context,\n network)\n\n # create on networl ctrl\n try:\n resource = NET_RESOURCE_PATH % tenant_id\n data = {\n \"network\": {\n \"id\": new_net[\"id\"],\n \"name\": new_net[\"name\"],\n }\n }\n ret = self.servers.post(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to create remote \"\n \"network: %s\"), e.message)\n super(QuantumRestProxyV2, self).delete_network(context,\n new_net['id'])\n raise\n\n # return created network\n return new_net", "def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def create_gateway():\n # Start the gateway to connect with py4j\n if 'gateway' not in os.popen('jps').read():\n os.system('java gateway &')\n delay(0.5)\n LOGGER.debug('Started Java gateway')\n # Connect to gateway getting jvm object\n LOGGER.debug('Connecting to gateway from py4j')\n gate = JavaGateway()\n return gate", "def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net", "def run(self, enable_snat=True, network_create_args=None,\n router_create_args=None):\n network_create_args = network_create_args or {}\n router_create_args = router_create_args or {}\n\n ext_net = self.neutron.create_network(**network_create_args)\n router = self.neutron.create_router(**router_create_args)\n self.neutron.add_gateway_to_router(router_id=router[\"id\"],\n network_id=ext_net[\"id\"],\n enable_snat=enable_snat)\n self.neutron.remove_gateway_from_router(router[\"id\"])", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def initialize_gateway(self, network_ref):\n raise NotImplementedError()", "def post(self):\n context = request.environ.get('context')\n json = util.copy_project_id_into_json(context, g.json)\n network_obj = dbapi.networks_create(context, json)\n return jsonutils.to_primitive(network_obj), 200, None", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def create_ig(ec2):\n ## create internet gateway\n print(\"\\n===Creating Internet Gateway...\")\n ig = ec2.create_internet_gateway(TagSpecifications=[{\n \"ResourceType\":\"internet-gateway\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": IG_NAME},\n ]}])\n print(\"===Internet gateway is reay!!\")\n return ig", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def post(self, request, format=None):\n site_owner = SiteOwner.objects.get(user=self.request.user)\n num_gateways = Gateway.objects.filter(site_owner=site_owner).count()\n if num_gateways == 4:\n return Response(\"Maximum number of gateways\", 204)\n next_index = num_gateways + 1\n\n gateway_id = f\"{site_owner.postal_code}-{site_owner.unit_no}-{next_index}\"\n\n gateway = Gateway(\n gateway_id=gateway_id,\n site_owner=site_owner,\n )\n gateway.save()\n\n serializer = GatewaySerializer(gateway)\n return Response(serializer.data)", "def ex_create_network(self, resource_group, network, extra=None, location=None):\n if location is None:\n if self.default_location:\n location = self.default_location\n else:\n raise ValueError(\"location is required.\")\n target = \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s\" % (\n self.subscription_id, resource_group, network)\n params = {\"api-version\": \"2016-03-30\"}\n data = {\n \"tags\": {},\n \"location\": location.id,\n }\n\n if extra:\n data[\"properties\"] = extra\n\n r = self.connection.request(action=target,\n params=params,\n data=data,\n method=\"PUT\")\n\n while r.object is None:\n time.sleep(1)\n\n return AzureNetwork(r.object[\"id\"], r.object[\"name\"], r.object[\"location\"], r.object[\"properties\"])" ]
[ "0.8282759", "0.735945", "0.71596324", "0.6880562", "0.6804249", "0.6776266", "0.6756551", "0.6709377", "0.6702008", "0.66921186", "0.6571167", "0.6495602", "0.64863545", "0.6467722", "0.64599526", "0.64504224", "0.6411406", "0.63999265", "0.6377891", "0.63314635", "0.6322438", "0.6291527", "0.62841994", "0.6258207", "0.6224585", "0.62211317", "0.6195619", "0.6185958", "0.6177809", "0.61755174" ]
0.86117136
0
Update a network gateway.
def update_network_gateway(self, gateway_id, body=None): return self.put(self.network_gateway_path % gateway_id, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self) -> None:\n self._gateway.update()", "def update_gateway_device(self, gateway_device_id, body=None):\r\n return self.put(self.gateway_device_path % gateway_device_id,\r\n body=body)", "def update(self):\n return self._api.update_payment_gateway(**to_dict(self))", "def update_nat(self, natgw, **attrs):\n return self._update(_gw.Service, natgw, **attrs)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def setGateway(self, gateway):\n # type: (str)->None\n\n self._validator.validate_one(\n 'gateway', VALID_OPTS['gateway'], gateway)\n self._ifAttributes['gateway'] = gateway", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def setGateway(self):\n\t\tself.gatewayip = self.settings.getKeyValue('gatewayip')\n\t\tself.socket.send('setenv gatewayip ' + self.gatewayip+'\\r', 1)\n\t\treturn None", "def update_vnet_gateway(\n self, resource_group_name, name, vnet_name, gateway_name, connection_envelope, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.update_vnet_gateway.metadata['url']\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+[^\\.]$'),\n 'name': self._serialize.url(\"name\", name, 'str'),\n 'vnetName': self._serialize.url(\"vnet_name\", vnet_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/json'\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct body\n body_content = self._serialize.body(connection_envelope, 'VnetGateway')\n\n # Construct and send request\n request = self._client.put(url, query_parameters, header_parameters, body_content)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n raise models.DefaultErrorResponseException(self._deserialize, response)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('VnetGateway', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def set_router_gateway(self, ext_net_name, router_id):\n _ext_net_id = self.get_net_id(ext_net_name)\n if not isinstance(_ext_net_id, unicode):\n return\n\n LOG_OBJ.debug(\"Setting external gateway of %s router.\" % router_id)\n\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers/\" + \\\n router_id + \".json\"\n\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _gwdata = {\"router\": {\"external_gateway_info\":\n {\"network_id\": _ext_net_id}}}\n _body = json.dumps(_gwdata)\n\n response = self.request(\"PUT\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while setting router:\"\n \" %s to gateway: %s\" % (router_id, _ext_net_id))\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Setting router gateway Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Router Gateway set is done for %s router\" % router_id)\n return True", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def _set_default_gateway(self, gateway_ip, ifname):\n version = 4\n if gateway_ip.version == 6:\n version = 6\n current = self._get_default_gateway(version)\n desired = str(gateway_ip)\n ifname = self.generic_to_host(ifname)\n\n if current and current != desired:\n # Remove the current gateway and add the desired one\n self.sudo(\n '-%s' % version, 'route', 'del', 'default', 'via', current,\n 'dev', ifname\n )\n return self.sudo(\n '-%s' % version, 'route', 'add', 'default', 'via', desired,\n 'dev', ifname\n )\n if not current:\n # Add the desired gateway\n return self.sudo(\n '-%s' % version, 'route', 'add', 'default', 'via', desired,\n 'dev', ifname\n )", "def send_update(self, neighbor):\n message = 'ROUTE UPDATE'\n source = ':'.join([self.name[0], str(self.name[1])])\n dv = []\n for others in self.distance_vector:\n others_sep = others.split(':')\n dv.append(','.join([others_sep[0], others_sep[1], str(self.distance_vector[others].cost)]))\n dv = '\\n'.join(dv)\n to_send = '\\n'.join([message, source, dv])\n neighbor.sok.sendto(to_send, (neighbor.addr, neighbor.port))\n neighbor.send_timer = time.time()\n neighbor.update_ready = False", "def put(self, id):\n context = request.environ.get('context')\n net_obj = dbapi.networks_update(context, id, request.json)\n return jsonutils.to_primitive(net_obj), 200, None", "def _update_router_gw_info(self, context, router_id, info, router=None):\n LOG.debug(\"Class OVNL3RouterPlugin:::\")\n router = router or self._get_router(context, router_id)\n gw_port = router.gw_port\n network_id = self._validate_gw_info(context, gw_port, info, None)\n\n self._delete_current_gw_port(context, router_id, router, network_id)\n self._create_gw_port(context, router_id, router, network_id, None)", "def update_default_gateway(self, config):\n # Track whether we have set the default gateways, by IP\n # version.\n gw_set = {\n 4: False,\n 6: False,\n }\n\n ifname = None\n for net in config.networks:\n if not net.is_external_network:\n continue\n ifname = net.interface.ifname\n\n # The default v4 gateway is pulled out as a special case\n # because we only want one but we might have multiple v4\n # subnets on the external network. However, sometimes the RUG\n # can't figure out what that value is, because it thinks we\n # don't have any external IP addresses, yet. In that case, it\n # doesn't give us a default.\n if config.default_v4_gateway:\n self._set_default_gateway(config.default_v4_gateway, ifname)\n gw_set[4] = True\n\n # Look through our networks and make sure we have a default\n # gateway set for each IP version, if we have an IP for that\n # version on the external net. If we haven't already set the\n # v4 gateway, this picks the gateway for the first subnet we\n # find, which might be wrong.\n for net in config.networks:\n if not net.is_external_network:\n continue\n\n for subnet in net.subnets:\n if subnet.gateway_ip and not gw_set[subnet.gateway_ip.version]:\n self._set_default_gateway(\n subnet.gateway_ip,\n net.interface.ifname\n )\n gw_set[subnet.gateway_ip.version] = True", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def put(self, request, pk, format=None):\n gateway = self.get_object(pk)\n\n # Get token value\n auth = self.request.headers[\"Authorization\"].split()\n if auth[0].lower() != \"token\":\n return Response(404)\n if len(auth) == 1:\n return Response(404)\n elif len(auth) > 2:\n return Response(404)\n token = auth[1]\n token_hash = hashlib.sha256(token.encode()).hexdigest()\n\n site_owner = SiteOwner.objects.get(user=self.request.user)\n gateways = Gateway.objects.filter(site_owner=site_owner)\n\n # Check gateway to update belongs to site owner\n if gateway not in gateways:\n return Response(\"Gateway does not belong to site owner\", 403)\n\n # Toggle token value\n if gateway.authentication_token == None:\n # Start gateway\n # Check that current token key has not been used\n for check_gateway in gateways:\n if token_hash == check_gateway.authentication_token:\n return Response(\"Token already used\")\n gateway.authentication_token = token_hash\n else:\n # Stop gateway\n gateway.authentication_token = None\n gateway.save()\n\n serializer = GatewaySerializer(gateway)\n return Response(serializer.data)", "def update(self, oid, name=None, network=None, external_ips=None, routes=None):\n data = {\n \"router\": {\n }\n }\n if name is not None:\n data['router']['name'] = name\n if network is not None:\n data['router']['external_gateway_info'] = {u'network_id':network}\n if network is not None and external_ips is not None:\n data['router']['external_gateway_info']['external_fixed_ips'] = external_ips\n if routes is not None:\n data['router']['routes'] = routes\n \n path = '%s/routers/%s' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack router: %s' % truncate(res))\n return res[0]['router']", "def _update_dnsmasq(self, network_id):\n\n # Check whether we should really do the following processing.\n if self.suppress_dnsmasq_updates:\n LOG.debug(\"Don't update dnsmasq yet;\"\n \" must be processing a snapshot\")\n self.dirty_networks.add(network_id)\n return\n\n self.dnsmasq_updater.update_network(network_id)", "def edit_payment_gateway(cls, api, id, **data):\n return api.update_payment_gateway(id, **data)", "def connect_network_gateway(self, gateway_id, body=None):\r\n base_uri = self.network_gateway_path % gateway_id\r\n return self.put(\"%s/connect_network\" % base_uri, body=body)", "def update(self, remote_gateway_info):\n\n self.touch()\n # remember, don't flag connection stats as worthy of a change.\n self.msg.conn_stats = remote_gateway_info.conn_stats\n # self.msg.last_connection_timestamp = rospy.Time.now() # do we really need this?\n\n # don't update every client, just the ones that we need information from\n important_state = (self.state == ConcertClient.State.AVAILABLE) or (self.state == ConcertClient.State.MISSING)\n is_changed = False\n return is_changed", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)" ]
[ "0.73716456", "0.661934", "0.6539232", "0.65375525", "0.6341725", "0.62306046", "0.61826205", "0.6149847", "0.60765433", "0.6051", "0.5973867", "0.59261507", "0.59006083", "0.5874266", "0.5866621", "0.5860527", "0.585056", "0.58328515", "0.5832191", "0.5825109", "0.5804609", "0.57717335", "0.57280135", "0.5697768", "0.5696997", "0.56921214", "0.5672983", "0.5667907", "0.56569904", "0.56524223" ]
0.8335931
0
Delete the specified network gateway.
def delete_network_gateway(self, gateway_id): return self.delete(self.network_gateway_path % gateway_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_gateway_device(self, gateway_device_id):\r\n return self.delete(self.gateway_device_path % gateway_device_id)", "def delete_nat_gateway(ec2_client=None, module=None, nat_gateway=None):\n nat_gateway_address = nat_gateway.get('NatGatewayAddresses')[0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n results = dict(changed=True, nat_gateway_id=nat_gateway_id,\n public_ip=nat_gateway_address.get('PublicIp'),\n private_ip=nat_gateway_address.get('PrivateIp'),\n allocation_id=nat_gateway_address.get('AllocationId'))\n ec2_client.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n wait = module.params.get('wait')\n if wait:\n wait_timeout = time.time() + module.params.get('wait_timeout')\n while wait_timeout > time.time():\n nat_gateway_status_list = get_nat_gateway_status_list(ec2_client=ec2_client, module=module)\n if nat_gateway_status_list[0].get('state') in ('deleted', 'absent'):\n module.exit_json(**results)\n else:\n time.sleep(5)\n module.fail_json(msg=\"Waited too long for VPC NAT Gateway to be deleted.\")\n else:\n module.exit_json(**results)", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)", "def delete(self, request, format=None):\n site_owner = SiteOwner.objects.get(user=self.request.user)\n gateway_to_delete = (\n Gateway.objects.filter(site_owner=site_owner).order_by(\"gateway_id\").last()\n )\n if gateway_to_delete is None:\n return Response(\"\", 204)\n\n serializer = GatewaySerializer(gateway_to_delete)\n try:\n gateway_to_delete.delete()\n except ProtectedError:\n return Response(\"\", 204)\n return Response(serializer.data)", "def disconnect_network_gateway(self, gateway_id, body=None):\r\n base_uri = self.network_gateway_path % gateway_id\r\n return self.put(\"%s/disconnect_network\" % base_uri, body=body)", "def delete_nat(self, natgw, ignore_missing=True):\n return self._delete(_gw.Service, natgw, ignore_missing=ignore_missing)", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n url = '/gateways/{0}'.format(\n *self.encode_path_vars(id))\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete_network_profile(arn=None):\n pass", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def delete_gateways(self, vpc_client, vpc_id):\n # pylint: disable=line-too-long\n gateways = vpc_client.list_public_gateways(\n resource_group_id=self.resource_group_id\n ).get_result()[\"public_gateways\"]\n gateways_ids_of_vpc = [\n gateway[\"id\"] for gateway in gateways if gateway[\"vpc\"][\"id\"] == vpc_id\n ]\n for gateway_id in gateways_ids_of_vpc:\n # get_result() used for synchronization\n logger.debug(f\"Deleting gateway: {gateway_id}\")\n vpc_client.delete_public_gateway(gateway_id).get_result()", "def delete(self, params=None):\n self.client.delete_vpn_connection_route(**params)", "def delete_network(self, network_o):\n tenant_mo = self.moDir.lookupByDn(network_o.group)\n\n # Filters the tenant children in memory looking for the ones that belongs to the Ap class with an specific name\n ap_list = filter(lambda x: type(x).__name__ == 'Ap' and x.name == AP_NAME,\n self.query_child_objects(str(tenant_mo.dn)))\n if len(ap_list) > 0:\n network_ap = ap_list[0]\n # Filters the tenant children in memory looking for the ones that belongs to the AEPg\n # class with an specific name\n network_epgs = filter(lambda x: type(x).__name__ == 'AEPg' and x.name == network_o.name + VLAN_SUFIX +\n str(network_o.encapsulation),\n self.query_child_objects(str(network_ap.dn)))\n # Removes EPG\n if len(network_epgs) > 0:\n network_epgs[0].delete()\n self.commit(network_epgs[0])\n\n # Filters the tenant children in memory looking for the ones that belongs to the BD class and with an specific\n # name\n bd_list = filter(lambda x: type(x).__name__ == 'BD' and x.name == VLAN + str(network_o.encapsulation),\n self.query_child_objects(str(tenant_mo.dn)))\n if len(bd_list) > 0:\n # Removes bridge domain\n bd_list[0].delete()\n self.commit(bd_list[0])", "def update_network_gateway(self, gateway_id, body=None):\r\n return self.put(self.network_gateway_path % gateway_id, body=body)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete(self):\n\n uri = \"{0}/{1}\".format(self.base_uri, self.ip_or_ifname_or_group_name)\n\n try:\n response = self.session.request(\"DELETE\", uri)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n logging.info(\"SUCCESS: Deleting %s\", self)\n\n # Delete back reference from BGP_Routers\n for neighbor in self.__parent_bgp_router.bgp_neighbors:\n if (\n neighbor.ip_or_ifname_or_group_name\n == self.ip_or_ifname_or_group_name\n ):\n self.__parent_bgp_router.bgp_neighbors.remove(neighbor)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])" ]
[ "0.74179", "0.7397583", "0.7236924", "0.7101013", "0.6979045", "0.6832819", "0.66104114", "0.64691854", "0.6339585", "0.63345855", "0.62928927", "0.62730354", "0.62596416", "0.6257655", "0.6179543", "0.6134833", "0.61258703", "0.6033291", "0.60238856", "0.6016785", "0.59936714", "0.598875", "0.5970795", "0.59598446", "0.59526914", "0.5919606", "0.5891707", "0.5865472", "0.58408403", "0.5840184" ]
0.86855984
0
Connect a network gateway to the specified network.
def connect_network_gateway(self, gateway_id, body=None): base_uri = self.network_gateway_path % gateway_id return self.put("%s/connect_network" % base_uri, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ConnectNetwork(self, network_name, group_name, mode, link,\n vlan=\"\", dry_run=False, reason=None):\n body = {\n \"group_name\": group_name,\n \"network_mode\": mode,\n \"network_link\": link,\n \"network_vlan\": vlan,\n }\n\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/connect\" %\n (GANETI_RAPI_VERSION, network_name)), query, body)", "def connect(self, setting: dict, gateway_name: str):\n gateway = self.get_gateway(gateway_name)\n if gateway:\n gateway.connect(setting)", "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def connect_dc_network(self, dc_network):\n self.manage.net = dc_network\n self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))", "def connect() -> None:\n wlan = network.WLAN(network.STA_IF)\n wlan.active(True)\n\n if not wlan.isconnected():\n wlan.connect(config.WIFI_SSID, config.WIFI_PASSWORD)\n # Wait for connection.\n for _ in range(20):\n if wlan.isconnected():\n return\n utime.sleep(1)\n\n raise Exception('Could not connect to network')", "def connectToInternet( network, switch='s1', rootip='10.254', subnet='10.0/8'):\n switch = network.get( switch )\n prefixLen = subnet.split( '/' )[ 1 ]\n routes = [ subnet ] # host networks to route to\n \n # Create a node in root namespace\n root = Node( 'root', inNamespace=False )\n \n # Stop network-manager so it won't interfere\n root.cmd( 'service network-manager stop' )\n \n # Create link between root NS and switch\n link = network.addLink( root, switch )\n link.intf1.setIP( rootip, prefixLen )\n \n # Start network that now includes link to root namespace\n network.start()\n \n # Start NAT and establish forwarding\n startNAT( root )\n \n # Establish routes from end hosts\n for host in network.hosts:\n host.cmd( 'ip route flush root 0/0' )\n host.cmd( 'route add -net', subnet, 'dev', host.defaultIntf() )\n host.cmd( 'route add default gw', rootip )\n \n print \"*** Hosts are running and should have internet connectivity\"\n print \"*** Type 'exit' or control-D to shut down network\"\n CLI( network )\n \n stopNAT( root )", "def _connect_to_gateway(self):\n for key in self.ssh_pkeys:\n self.logger.debug('Trying to log in with key: {0}'\n .format(hexlify(key.get_fingerprint())))\n try:\n self._transport = self._get_transport()\n self._transport.connect(hostkey=self.ssh_host_key,\n username=self.ssh_username,\n pkey=key)\n if self._transport.is_alive:\n return\n except paramiko.AuthenticationException:\n self.logger.debug('Authentication error')\n self._stop_transport()\n\n if self.ssh_password: # avoid conflict using both pass and pkey\n self.logger.debug('Trying to log in with password: {0}'\n .format('*' * len(self.ssh_password)))\n try:\n self._transport = self._get_transport()\n self._transport.connect(hostkey=self.ssh_host_key,\n username=self.ssh_username,\n password=self.ssh_password)\n if self._transport.is_alive:\n return\n except paramiko.AuthenticationException:\n self.logger.debug('Authentication error')\n self._stop_transport()\n\n self.logger.error('Could not open connection to gateway')", "def connect(self):\n self.net.active(True)\n self.net.config(essid=self.ssid, password=self.pwd, channel=3)\n\n while not self.net.active():\n pass\n\n self.net.ifconfig((\"192.168.4.5\", \"255.255.255.0\", \"192.168.4.1\", \"208.67.222.222\"))", "def connect_node(config, node, gateway):\n try:\n conn = Connection(node, user=config.user, gateway=gateway, connect_kwargs=gateway.connect_kwargs)\n conn.open()\n if conn.is_connected:\n logging.info(f\"Connection to {Fore.CYAN}{node}{Style.RESET_ALL} through {Fore.CYAN}{gateway.host}{Style.RESET_ALL} ESTABLISHED\")\n return conn\n except:\n logging.exception(f\"Connection to {Fore.RED}{node}{Style.RESET_ALL} through {Fore.CYAN}{gateway.host}{Style.RESET_ALL} FAILED\")", "def initialize_gateway(self, network_ref):\n raise NotImplementedError()", "async def connect(self, gateway_url):\n if self.ws is not None:\n if not self.ws.closed:\n await self.ws.close()\n self.ws = None\n self.gateway_url = gateway_url\n try:\n self.ws = await self.client.http.create_ws(gateway_url,\n compression=0)\n except ClientConnectorError:\n await self.client.close()\n raise GatewayUnavailable() from None\n self.loop.create_task(self.read_loop())\n self.connected.set()\n if self.session_id is None:\n async with self.client.connection_lock:\n self.client.remaining_connections -= 1\n if self.client.remaining_connections <= 1:\n self.logger.info(\"Max connections reached!\")\n gateway_url, shard_count, _, connections_reset_after = \\\n await self.client.get_gateway()\n await sleep(connections_reset_after / 1000)\n gateway_url, shard_count, self.client.remaining_connections, connections_reset_after = await self.client.get_gateway()\n await self.identify()\n else:\n await self.resume()", "def CreateNetwork(self, network_name, network, gateway=None, network6=None,\n gateway6=None, mac_prefix=None,\n add_reserved_ips=None, tags=None, dry_run=False,\n reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n if add_reserved_ips:\n add_reserved_ips = add_reserved_ips.split(\",\")\n\n if tags:\n tags = tags.split(\",\")\n\n body = {\n \"network_name\": network_name,\n \"gateway\": gateway,\n \"network\": network,\n \"gateway6\": gateway6,\n \"network6\": network6,\n \"mac_prefix\": mac_prefix,\n \"add_reserved_ips\": add_reserved_ips,\n \"tags\": tags,\n }\n\n return self._SendRequest(HTTP_POST, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, body)", "def connect_to_reference_network(self):\n self.dut.droid.wakeLockAcquireBright()\n self.dut.droid.wakeUpNow()\n try:\n self.dut.droid.wifiConnectByConfig(self.reference_networks[0][\"2g\"])\n connect_result = self.dut.ed.pop_event(\n wifi_constants.CONNECT_BY_CONFIG_SUCCESS, SHORT_TIMEOUT)\n self.log.info(connect_result)\n return wutils.track_connection(self.dut,\n self.reference_networks[0][\"2g\"][\"SSID\"], 1)\n except Exception as error:\n self.log.exception(traceback.format_exc())\n self.log.error(\"Connection to network fail because %s\", error)\n return False\n finally:\n self.dut.droid.wifiLockRelease()\n self.dut.droid.goToSleepNow()", "def using_network(ssid, password, antenna=0):\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(True)\n sta_if.config(antenna=antenna) # select antenna, 0=chip, 1=external\n sta_if.connect(ssid, password)\n while not sta_if.isconnected():\n # Check the status\n status = sta_if.status()\n # Constants aren't implemented for PYBD as of MicroPython v1.13.\n # From: https://github.com/micropython/micropython/issues/4682\n # 'So \"is-connecting\" is defined as s.status() in (1, 2) and \"is-connected\" is defined as s.status() == 3.'\n #\n if status <= 0:\n # Error States?\n return False\n #if ((status == network.WLAN.STAT_IDLE) or (status == network.WLAN.STAT_WRONG_PASSWORD)\n # or (status == network.WLAN.STAT_NO_AP_FOUND) or (status == network.WLAN.STAT_CONNECT_FAIL)):\n # Problems so return\n # return False\n\n print('network config:', sta_if.ifconfig())\n return True", "def set_network(self, network: str) -> None:\n return self.add_value(self._network_attribute, network)", "def connect(self):\n # check if network is connected. If yes: return, finished\n # 2019-0801 changed: if self._wlan.isconnected():\n if self.isconnected:\n if USE_DEBUG:\n print('WLAN already connected')\n return self._wlan.ifconfig()\n\n # activate Wifi interface\n if self._wlan.active() is False:\n self._wlan.active(True)\n # scan available networks for the required one\n nets = self._wlan.scan()\n for net in nets:\n ssid = net[0]\n if ssid == bytearray(self._config['SSID']): # must use bytearray!\n if USE_DEBUG:\n print(\"Startup WiFi ...\" + self._config['SSID'])\n # specify if static or dynamic IP is requested\n # STATIC IP: an IP is given\n # DYNAMIC IP: None\n if self._config['STATIC_IP'] is not '':\n if USE_DEBUG:\n print('WifiManager::Static IP configuration')\n # configure network for static IP\n self._wlan.ifconfig((self._config['STATIC_IP'],\n self._config['MASKER'],\n self._config['GATEWAY_IP'],\n self._config['DNS']))\n\n # connect to SSID... either for STATIC or DYNAMIC IP\n self._wlan.connect(self._config['SSID'],\n self._config['PASSWRD'])\n while not self.isconnected:\n idle() # save power while waiting\n time.sleep_ms(100) # give it some time\n if USE_DEBUG:\n print(\"Network '{}' connection succeeded!\".format(ssid))\n break\n\n # check connection, if not succesfull: raise exception\n if not self._wlan.active():\n raise exception('Network {0} not found.'.format(ssid))\n\n # returns network configuration...\n # although 'myPy.local' should work on MacOS X (Bonjour)\n return self._wlan.ifconfig()", "def do_connect(self):\n # Attempting STA connection\n print('connecting to network...')\n\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.password)\n for retry_count in range(self.num_retries):\n if self.sta_if.isconnected():\n break\n print(\"Waiting for connection {}/{}\".format(retry_count, self.num_retries))\n time.sleep(1)\n\n # Success:\n if self.sta_if.isconnected():\n self.mode = STA_MODE\n print('network config:', self.sta_if.ifconfig())\n for _ in range(self.num_retries):\n try:\n ntptime.settime()\n break\n except:\n pass\n time.sleep(1)\n\n # Failure, starting access point\n else:\n print('Could not connect, creating WiFi access point')\n self.sta_if.active(False)\n self.create_ap()\n self.mode = AP_MODE", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def connect(self, netid):\n if netid in _k.networks:\n pagename = \"network%d\" % self._page_serial\n self._page_serial += 1\n net = _k.networks[netid]\n page = self._notebook.add(pagename, label=net.name)\n page._netframe = NetworkFrame(page, pagename=pagename, network=net,\n netid=netid)\n page._netframe.pack(fill=Tix.BOTH, expand=True)", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def create_nat_gateway(ec2_client=None, module=None):\n try:\n result = ec2_client.create_nat_gateway(SubnetId=module.params.get('subnet_id'),\n AllocationId=module.params.get('allocation_id'))\n wait = module.params.get('wait')\n start_time = datetime.datetime.utcnow()\n results = dict(changed=True)\n if wait:\n wait_timeout = time.time() + module.params.get('wait_timeout')\n while wait_timeout > time.time():\n if 'nat_gateway' in results:\n break\n for present_status in get_nat_gateway_status_list(ec2_client=ec2_client, module=module):\n create_time = present_status.get('created')\n if present_status.get('state') == 'failed' and create_time.replace(tzinfo=None) >= start_time:\n module.fail_json(msg=\"Failed to create VPC NAT Gateway\")\n elif present_status.get('state') == 'available':\n results['nat_gateway'] = camel_dict_to_snake_dict(result['NatGateway'])\n break\n else:\n time.sleep(5)\n else:\n module.fail_json(msg=\"Waited too long for VPC NAT Gateway to be created.\")\n module.exit_json(**results)\n except botocore.exceptions.ClientError as ce:\n module.fail_json(msg=ce.message, **camel_dict_to_snake_dict(ce.response))", "def Connect(self, ap, interface=None, passkey=None,\n connect_timeout=None, connect_attempt_timeout=None,\n dhcp_timeout=None):\n if not isinstance(ap, AccessPoint):\n raise WiFiError('Expected AccessPoint for ap argument: %s' % ap)\n interface = self._ValidateInterface(interface)\n conn = self._NewConnection(\n dut=self._device, interface=interface,\n ap=ap, passkey=passkey,\n connect_timeout=connect_timeout,\n connect_attempt_timeout=connect_attempt_timeout,\n dhcp_timeout=dhcp_timeout,\n tmp_dir=self.tmp_dir)\n conn.Connect()\n return conn", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def test_make_connection(network_with_devices):\n network = network_with_devices\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, OR1_ID, I1, I2] = names.lookup([\"Sw1\", \"Sw2\", \"Or1\", \"I1\",\n \"I2\"])\n\n or1 = devices.get_device(OR1_ID)\n\n # or1 inputs are initially unconnected\n assert or1.inputs == {I1: None,\n I2: None}\n\n # Make connections\n network.make_connection(SW1_ID, None, OR1_ID, I1)\n network.make_connection(SW2_ID, None, OR1_ID, I2)\n\n # or1 inputs should now be connected\n assert or1.inputs == {I1: (SW1_ID, None),\n I2: (SW2_ID, None)}", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)" ]
[ "0.71485025", "0.71401685", "0.6828193", "0.64658195", "0.6440116", "0.63980526", "0.63615894", "0.6331955", "0.6327372", "0.6321787", "0.63109803", "0.6249003", "0.6223708", "0.61937", "0.6121953", "0.6100455", "0.60495764", "0.5982616", "0.59816384", "0.5966832", "0.59562147", "0.5882711", "0.5880096", "0.58588797", "0.58418614", "0.5825862", "0.58086395", "0.5790024", "0.5758165", "0.5757983" ]
0.8106213
0
Disconnect a network from the specified gateway.
def disconnect_network_gateway(self, gateway_id, body=None): base_uri = self.network_gateway_path % gateway_id return self.put("%s/disconnect_network" % base_uri, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_network_gateway(self, gateway_id):\r\n return self.delete(self.network_gateway_path % gateway_id)", "def DisconnectWireless(self):\n self.SetForcedDisconnect(True)\n self.wifi.Disconnect()", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def docker_network_disconnect(args, container_id, network): # type: (EnvironmentConfig, str, str) -> None\n docker_command(args, ['network', 'disconnect', network, container_id], capture=True)", "def delete_nat_gateway(ec2_client=None, module=None, nat_gateway=None):\n nat_gateway_address = nat_gateway.get('NatGatewayAddresses')[0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n results = dict(changed=True, nat_gateway_id=nat_gateway_id,\n public_ip=nat_gateway_address.get('PublicIp'),\n private_ip=nat_gateway_address.get('PrivateIp'),\n allocation_id=nat_gateway_address.get('AllocationId'))\n ec2_client.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n wait = module.params.get('wait')\n if wait:\n wait_timeout = time.time() + module.params.get('wait_timeout')\n while wait_timeout > time.time():\n nat_gateway_status_list = get_nat_gateway_status_list(ec2_client=ec2_client, module=module)\n if nat_gateway_status_list[0].get('state') in ('deleted', 'absent'):\n module.exit_json(**results)\n else:\n time.sleep(5)\n module.fail_json(msg=\"Waited too long for VPC NAT Gateway to be deleted.\")\n else:\n module.exit_json(**results)", "def delete_gateway_device(self, gateway_device_id):\r\n return self.delete(self.gateway_device_path % gateway_device_id)", "def DisconnectNetwork(self, network_name, group_name, dry_run=False,\n reason=None):\n body = {\n \"group_name\": group_name,\n }\n\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/disconnect\" %\n (GANETI_RAPI_VERSION, network_name)), query, body)", "def disconnect(self, message=''):\n self.logger.info(\"Disconnecting from network\")\n self.plugin_manager.unload_all()\n self.factory.disconnect = True\n self.quit(message)", "def DisconnectByEdgeInNetwork(self, edge):\n try:\n self.connections.remove((edge.node1, edge.node2))\n edge.node1.removeNeighbour(edge.node2.index)\n except Exception as exc:\n print(\"Exception {} occured when trying to disconnect the edge\".format(exc))", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def disconnect(self) -> None:\n if ':' in self.device_id:\n self.cmd(f\"disconnect {self.device_id}\", devices=False)", "def disconnect(self,connection_name):\n return self.network.disconnect(connection_name)", "def Disconnect(**argd):\n flag, ret = CController.CController.Disconnect(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n return CGateway._SuccessResponse() if ret is True else CGateway._FailureResponse()", "def _DisconnectAP(self):\n disconnect_command = 'iw dev {interface} disconnect'.format(\n interface=self.interface)\n # This call may fail if we are not connected to any network.\n self._device.Call(disconnect_command)", "def disconnect(self):\n r = requests.post(f'{self.SERVER_ADDR}/api/disconnect', headers={'Authorization': 'Token ' + self.token})\n r.raise_for_status()", "def disconnect(self, timeout=-1):\n if self.switch_socket:\n self.switch_socket.close()\n self.switch_socket = None\n self.switch_addr = None\n with self.packets_cv:\n self.packets = []\n with self.connect_cv:\n self.connect_cv.notifyAll()\n if self.bridge_socket:\n self.bridge_socket.close()", "async def disconnect(request: web.Request) -> web.Response:\n try:\n body = await request.json()\n except json.JSONDecodeError as e:\n log.exception(\"Error: JSONDecodeError in \"\n \"/wifi/disconnect: {}\".format(e))\n return web.json_response({'message': e.msg}, status=400)\n\n ssid = body.get('ssid')\n try:\n # SSID must always be present\n if not ssid or not isinstance(ssid, str):\n raise DisconnectArgsError(\"SSID must be specified as a string\")\n except DisconnectArgsError as e:\n return web.json_response({'message': str(e)}, status=400)\n\n try:\n ok, message = await nmcli.wifi_disconnect(ssid)\n except Exception as excep:\n return web.json_response({'message': str(excep)}, status=500)\n\n response = {'message': message}\n if ok:\n stat = 200 if 'successfully deleted' in message else 207\n else:\n stat = 500\n return web.json_response(response, status=stat)", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def disconnect(self, connection, disallow_local_disconnect=True):\n\n sock = connection[0]\n address = connection[1]\n\n terminated = self.read_nodestate(2)\n\n try:\n if disallow_local_disconnect:\n Primitives.log(\"Terminated:\"+str(terminated), in_log_level=\"Debug\")\n\n if address == Primitives.get_local_ip() and not terminated:\n\n Primitives.log(\"(Bug) Refusing to disconnect from localhost;\"\n \"that's a terrible idea...\", in_log_level=\"Warning\")\n return None\n\n else:\n\n Primitives.log(\"\\n\\tSelf.disconnect() called.\\n\", in_log_level=\"Info\")\n\n verbose_connection_msg = str(\"Disconnecting from \" + address\n + \"\\n\\t( \" + str(sock) + \" )\")\n\n Primitives.log(verbose_connection_msg, in_log_level=\"Info\")\n\n conn_remove_msg = str(\"Server -> Removing \" + str(sock) + \" from network tuple\")\n Primitives.log(conn_remove_msg, in_log_level=\"Info\")\n self.remove(connection)\n sock.close()\n\n Primitives.log(\"Successfully Disconnected.\", in_log_level=\"Info\")\n\n # Socket not in network tuple . Probably already disconnected, or the socket was [closed]\n except IndexError:\n Primitives.log(\"Already disconnected from that address; passing;\", in_log_level=\"Warning\")", "def disconnect(self):\n \n self.net.active(False)", "def stop_network(self):\n self.net.stop()\n cleanup()", "async def disconnect(self):\n try:\n #print(\"Send disconnect command\")\n await self._writeControlPacket(ControlPacketsGenerator.getDisconnectPacket())\n except Exception as err:\n # TODO: catch this error if it is something like already disconnected\n #print(\"Unknown error\")\n raise err\n\n try:\n # Disconnect from this side as well.\n #print(\"Disconnect from this side as well\")\n self.core.ble.disconnect()\n except Exception as err:\n #print(\"Unknown error\")\n raise err", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def do_disconnect(self, *noargs):\n self.link.close()", "async def disconnect(self):\n if not self._session:\n await self._create_session()\n await self._session.post(self._network.SERVER_ADDR + '/api/disconnect')", "def Disconnect(self):\n if not self._auth_process or not self._dhcp_process:\n raise WiFiError('Must connect before disconnecting')\n\n self.ip = None\n dhcp_process, self._dhcp_process = self._dhcp_process, None\n auth_process, self._auth_process = self._auth_process, None\n next(dhcp_process)\n next(auth_process)\n\n # Remove temporary directory.\n if not self._user_tmp_dir:\n self._tmp_dir_handle.__exit__(None, None, None)\n self._tmp_dir = None", "async def disconnect(self) -> None:\n self.client.loop_stop()\n self.client.disconnect()\n self.connected = False\n self.log.debug(\"Disconnected.\")", "def wlanDisconnect(self, iface):\n log.debug('WlanInterface wlanDisconnect() - iface:%s' % (iface.miniDesc()))\n ret = WlanDisconnect( self._handle, \n byref(iface.InterfaceGuid), \n None)\n if ret != ERROR_SUCCESS:\n raise WinError(ret)", "def disconnect(request, backend, association_id=None):\n backend = get_backend(backend, request, request.path)\n if not backend:\n return HttpResponseServerError('Incorrect authentication service')\n backend.disconnect(request.user, association_id)\n url = request.REQUEST.get(REDIRECT_FIELD_NAME, '') or \\\n DISCONNECT_REDIRECT_URL or \\\n DEFAULT_REDIRECT\n return HttpResponseRedirect(url)", "def disconnect(self):\n self.stop()\n self._send_command('exit')\n self.sock.close()\n self.disconnected = True" ]
[ "0.7099296", "0.6354061", "0.633697", "0.6201671", "0.61283344", "0.6002237", "0.5930887", "0.57622766", "0.56524765", "0.5643636", "0.5637644", "0.55920553", "0.55915636", "0.5495355", "0.5494552", "0.5468569", "0.54578805", "0.54431415", "0.54333377", "0.541832", "0.5407983", "0.5407383", "0.5395785", "0.5376068", "0.53650326", "0.53588736", "0.5355936", "0.5347094", "0.53318644", "0.5312579" ]
0.84754086
0
Fetch a gateway device.
def show_gateway_device(self, gateway_device_id, **_params): return self.get(self.gateway_device_path % gateway_device_id, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device(self):\n return self.broker.device(**{\"DeviceRouteID\": self.DeviceRouteID})", "def get_device(self, dev_id):\n return self.api_request('GET', self.url + '/device/' + str(dev_id), {})", "def get_device(self, device):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tresp = self.ise.get('{0}/config/networkdevice?filter=name.EQ.{1}'.format(self.url_base, device))\n\t\tfound_device = ERS._to_json(resp.text)\n\n\t\tif found_device['ns3:searchResult']['@total'] == '1':\n\t\t\tresp = self.ise.get('{0}/config/networkdevice/{1}'.format(\n\t\t\t\t\tself.url_base, found_device['ns3:searchResult']['ns3:resources']['ns5:resource']['@id']))\n\t\t\tif resp.status_code == 200:\n\t\t\t\tresult['success'] = True\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns4:networkdevice']\n\t\t\t\treturn result\n\t\t\telif resp.status_code == 404:\n\t\t\t\tresult['response'] = '{0} not found'.format(device)\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\t\telse:\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\telif found_device['ns3:searchResult']['@total'] == '0':\n\t\t\t\tresult['response'] = '{0} not found'.format(device)\n\t\t\t\tresult['error'] = 404\n\t\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def get_device(self, step=None):\n if not self.device_cache:\n if step == 'backup':\n try:\n self.device_cache = StaticDevice.objects.get(\n user=self.user.username, name='backup')\n except StaticDevice.DoesNotExist:\n pass\n if not self.device_cache:\n self.device_cache = default_device(self.user)\n return self.device_cache", "def get_device(arn=None):\n pass", "def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)", "def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)", "def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)", "def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)", "def device(self):\n return self.broker.device(**{\"JobDetailID\": self.JobDetailID})", "async def get_device(self, device_id: str) -> dict:\r\n return await self.get(API_DEVICE.format(device_id=device_id))", "def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device", "def get_device(device_id):\n netAdminToolDB = app.config['DATABASE']\n device = netAdminToolDB.get_device(device_id)\n\n if device == None:\n return jsonify({'error': 'Device_id not found'}), 404\n\n uri = url_for('get_device',device_id=device.id,_external=True)\n return jsonify({'device':{\n 'id': device.id,\n 'uri': uri,\n 'name': device.name,\n 'ip_addr': device.ip_addr,\n 'device_type_id': device.device_type_id,\n 'make': device.make,\n 'model': device.model,\n 'code': device.code,\n 'sw_version': device.sw_version,\n 'serial_number': device.serial_number,\n 'datacenter': device.datacenter,\n 'location': device.location,\n 'console': device.console,\n 'description': device.description,\n 'notes': device.notes\n }\n })", "def device(self):\n return self.broker.device(**{\"VirtualNetworkMemberID\": self.VirtualNetworkMemberID})", "def fusion_api_get_power_device(self, uri=None, param='', api=None, headers=None):\n return self.pd.get(uri=uri, api=api, headers=headers, param=param)", "def get_device(self, field):\n return self._devices[field]", "def get_device(token, device_id, sensitive_data=False):\n\n tenant = init_tenant_context(token, db)\n orm_device = assert_device_exists(device_id)\n return serialize_full_device(orm_device, tenant, sensitive_data)", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def get_gateway(self):\n return self.gateway", "def get_device(self):\n raise NotImplementedError()", "def show_network_gateway(self, gateway_id, **_params):\r\n return self.get(self.network_gateway_path % gateway_id, params=_params)", "def _get_device():\n return context.get_context('device_target')", "def get(self, did):\n dev = Device.query.filter(Device.id == did).one_or_none()\n if dev:\n return dev.json()\n else:\n return None, 404", "def discover_device(devCtrl, setupPayload):\n log.info(\"Attempting to find device on network\")\n longDiscriminator = int(setupPayload.attributes['Long discriminator'])\n try:\n res = devCtrl.DiscoverCommissionableNodes(\n discovery.FilterType.LONG_DISCRIMINATOR, longDiscriminator, stopOnFirst=True, timeoutSecond=5)\n except exceptions.ChipStackError as ex:\n log.error(\"DiscoverCommissionableNodes failed {}\".format(str(ex)))\n return None\n if not res:\n log.info(\"Device not found\")\n return None\n return res[0]", "def get_device(link):\n device = Device(\"\",0,0,0,0,0)\n device.link = link\n return device.identify()", "async def async_get_device(\n ip_address: str, *, session: Optional[ClientSession] = None\n) -> Device:\n device = Device(ip_address, session=session)\n await device.async_update_device_info()\n return device", "def retr_device( device_id ) :\n\n\t\t\t_logger.info( '...retr_device...' )\n\t\t\toutput = []\n\n\t\t\tdb = mongo.db.auth_devices\n\t\t\tdev = db.find( { 'device_id' : device_id } )\n\t\t\tif dev.count() == 0 :\n\t\t\t\t_logger.error( '...retr_device %s' % e.message )\n\t\t\t\traise mongo_no_resource_exception( 'no tokenized device found')\n\t\t\tfor device in dev :\n\t\t\t\toutput = {'moniker' : device['device_moniker'] ,\n\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t 'spawned' : device['spawned'] ,\n\t\t\t\t\t\t 'last_known_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t 'canonical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t 'segment' : device['segment'] ,\n\t\t\t\t\t\t 'auth_apps' : device['auth_apps'] ,\n\t\t\t\t\t\t 'cloak_origin' : device['cloak_origin'] ,\n\t\t\t\t\t\t 'cloak_monitor_stream' : device['cloak_monitor_stream'] ,\n\t\t\t\t\t\t 'auth_http_id' : device['auth_http_id']\n\t\t\t\t\t\t }\n\n\t\t\treturn jsonify({'result' : output})", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "async def get_device(hass: HomeAssistant, device_id: str) -> Optional[DeviceEntry]:\n device_registry = await hass.helpers.device_registry.async_get_registry()\n return device_registry.async_get(device_id)" ]
[ "0.64669836", "0.63796175", "0.624815", "0.62402916", "0.6227601", "0.6223333", "0.6223333", "0.6223333", "0.6223333", "0.60732704", "0.5993849", "0.5948876", "0.5947603", "0.58807874", "0.58714783", "0.5867177", "0.5859333", "0.5844973", "0.5844973", "0.58263415", "0.5808297", "0.5779478", "0.5763485", "0.57421225", "0.57306176", "0.5698277", "0.56748825", "0.56722367", "0.56656206", "0.5659686" ]
0.689964
0
Create a new gateway device.
def create_gateway_device(self, body=None): return self.post(self.gateway_devices_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)", "def _create_device(self):\n project_page = 'https://garage.maemo.org/projects/brisa'\n self.device = Device('urn:schemas-upnp-org:device:BinaryLight:1',\n self.server_name,\n manufacturer='Brisa Team. Embedded Laboratory '\\\n 'and INdT Brazil',\n manufacturer_url=project_page,\n model_name='Binary Light Device',\n model_description='A UPnP Binary Light Device',\n model_number='1.0',\n model_url=project_page)", "def create_gateway():\n # Start the gateway to connect with py4j\n if 'gateway' not in os.popen('jps').read():\n os.system('java gateway &')\n delay(0.5)\n LOGGER.debug('Started Java gateway')\n # Connect to gateway getting jvm object\n LOGGER.debug('Connecting to gateway from py4j')\n gate = JavaGateway()\n return gate", "def create_device(self, datacenter, devname, devtype, devaddress,\n devmanaddress, user, password):\n print \"Creating storage device %s at %s...\" % (devname, devaddress)\n device = StorageDevice.builder(self.__context, datacenter) \\\n .name(devname) \\\n .type(devtype) \\\n .iscsiIp(devaddress) \\\n .managementIp(devmanaddress) \\\n .username(user) \\\n .password(password) \\\n .build()\n device.save()\n return device", "def flask_create_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n params = {\n 'count': request.args.get('count', '1'),\n 'verbose': request.args.get('verbose', 'false'),\n 'content_type': request.headers.get('Content-Type'),\n 'data': request.data\n }\n\n result = DeviceHandler.create_device(params, token)\n devices = result.get('devices')\n deviceId = devices[0].get('id')\n LOGGER.info(f' Creating a new device with id {deviceId}.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def post(self):\n context = request.environ.get('context')\n json = util.copy_project_id_into_json(context, g.json)\n obj = dbapi.netdevices_create(context, json)\n device = jsonutils.to_primitive(obj)\n return device, 200, None", "def create(cls, device_name):\n connection = cls(device_name=device_name)\n connection.put()\n return connection", "def createDevice(self):\n deviceName = self.options.device\n self.log.info(\"Looking for %s\", deviceName)\n ip = ipunwrap(deviceName)\n if not isip(ip):\n try:\n ip = yield getHostByName(deviceName)\n except socket.error as ex:\n self.log.warn(\n \"Hostname lookup failed for %s: %s\", deviceName, ex\n )\n raise NoIPAddress(\"No IP found for name %s\" % deviceName)\n self.log.info(\"Found IP %s for device %s\", ip, deviceName)\n configs = yield self.config().callRemote(\n \"getDeviceConfig\", [deviceName]\n )\n config = configs[0] if configs else None\n if not config or config.temp_device or self.options.remodel:\n device = yield self.discoverDevice(\n ip,\n devicepath=self.options.deviceclass,\n prodState=self.options.productionState,\n deviceConfig=config,\n )\n if device:\n self.log.info(\"Discovered device %s.\", device.id)\n else:\n self.log.info(\"Device '%s' not found\", deviceName)\n defer.returnValue(device)\n else:\n self.log.info(\"Device '%s' already found\", deviceName)", "def test_create_device1(self):\n pass", "def create_device():\n sonyapilib.device.TIMEOUT = 0.1\n device = SonyDevice(\"test\", \"test\")\n device.api_version = 3\n device.cookies = jsonpickle.decode(read_file(\"data/cookies.json\"))\n return device", "def create(cls, imei, device_id):\n try:\n imei_device = cls(imei, device_id)\n imei_device.save()\n except Exception:\n raise Exception", "def create_device(self, name: str, app_eui: str, app_key: str, dev_eui: str):\n return create_device(self.api_key, name, app_eui, app_key, dev_eui)", "def create_device(cls, params, token):\n tenant = init_tenant_context(token, db)\n try:\n count = int(params.get('count'))\n except ValueError as e:\n LOGGER.error(e)\n raise HTTPRequestError(400, \"If provided, count must be integer\")\n\n c_length = len(str(count))\n verbose = params.get('verbose') in ['true', '1', 'True']\n if verbose and count != 1:\n raise HTTPRequestError(\n 400, \"Verbose can only be used for single device creation\")\n\n devices = []\n full_device = None\n orm_devices = []\n\n try:\n for i in range(0, count):\n content_type = params.get('content_type')\n data_request = params.get('data')\n device_data, json_payload = parse_payload(content_type, data_request, device_schema)\n validate_repeated_attrs(json_payload)\n\n if json_payload.get('id', None) is None or count > 1 : \n device_data['id'] = DeviceHandler.generate_device_id()\n else:\n DeviceHandler.validate_device_id(json_payload['id'])\n device_data['id'] = json_payload['id']\n\n device_data['label'] = DeviceHandler.indexed_label(count, c_length, device_data['label'], i)\n device_data.pop('templates', None)\n orm_device = Device(**device_data)\n parse_template_list(json_payload.get('templates', []), orm_device)\n auto_create_template(json_payload, orm_device)\n db.session.add(orm_device)\n orm_devices.append(orm_device)\n db.session.commit()\n except IntegrityError as error:\n handle_consistency_exception(error)\n except ValidationError as error:\n raise HTTPRequestError(400, error.messages)\n\n\n for orm_device in orm_devices:\n devices.append(\n {\n 'id': orm_device.id,\n 'label': orm_device.label\n }\n )\n\n full_device = serialize_full_device(orm_device, tenant)\n\n # Updating handlers\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.create(full_device, meta={\"service\": tenant})\n\n if verbose:\n result = {\n 'message': 'device created',\n 'devices': [full_device]\n }\n else:\n result = {\n 'message': 'devices created',\n 'devices': devices\n }\n return result", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def test_create_device_template(self):\n pass", "def create_gateway(self,\n gateway_template: 'GatewayTemplate',\n **kwargs\n ) -> DetailedResponse:\n\n if gateway_template is None:\n raise ValueError('gateway_template must be provided')\n if isinstance(gateway_template, GatewayTemplate):\n gateway_template = convert_model(gateway_template)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='create_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version\n }\n\n data = json.dumps(gateway_template)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n url = '/gateways'\n request = self.prepare_request(method='POST',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response", "def test_add_device(self):\n\n pass", "def add_device():\n input = request.get_json()\n\n if input == None:\n return jsonify({'error': 'Invalid POST request, no data'}), 400\n if not 'name' in input:\n return jsonify({'error': 'Invalid POST request, missing name'}), 400\n if not 'ip_addr' in input:\n return jsonify({'error': 'Invalid POST request, missing ip_addr'}), 400\n if not 'device_type_id' in input:\n return jsonify({'error': 'Invalid POST request, missing device_type_id'}), 400\n if not 'sw_version' in input:\n return jsonify({'error': 'Invalid POST request, missing sw_version'}), 400\n if not 'serial_number' in input:\n return jsonify({'error': 'Invalid POST request, missing serial_number'}), 400\n if not 'datacenter' in input:\n return jsonify({'error': 'Invalid POST request, missing datacenter'}), 400\n if not 'location' in input:\n return jsonify({'error': 'Invalid POST request, missing location'}), 400\n\n if not 'console' in input:\n input['console'] = ''\n if not 'description' in input:\n input['description'] = ''\n if not 'notes' in input:\n input['notes'] = ''\n\n netAdminToolDB = app.config['DATABASE']\n id = netAdminToolDB.add_device(input['name'], input['ip_addr'],\n input['device_type_id'], input['sw_version'],\n input['serial_number'], input['datacenter'], input['location'],\n input['console'], input['description'], input['notes'])\n\n device = netAdminToolDB.get_device(id)\n deviceDict = dict(device)\n uri = url_for('get_device',device_id=device.id,_external=True)\n deviceDict['uri'] = uri\n\n return jsonify({'device':deviceDict}), 201", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def CreateDevices(cfg,\n build_target=None,\n build_id=None,\n branch=None,\n kernel_build_id=None,\n kernel_branch=None,\n kernel_build_target=None,\n system_branch=None,\n system_build_id=None,\n system_build_target=None,\n bootloader_branch=None,\n bootloader_build_id=None,\n bootloader_build_target=None,\n gpu=None,\n num=1,\n serial_log_file=None,\n autoconnect=False,\n report_internal_ip=False,\n boot_timeout_secs=None,\n ins_timeout_secs=None):\n client_adb_port = None\n unlock_screen = False\n wait_for_boot = True\n logger.info(\n \"Creating a cuttlefish device in project %s, \"\n \"build_target: %s, \"\n \"build_id: %s, \"\n \"branch: %s, \"\n \"kernel_build_id: %s, \"\n \"kernel_branch: %s, \"\n \"kernel_build_target: %s, \"\n \"system_branch: %s, \"\n \"system_build_id: %s, \"\n \"system_build_target: %s, \"\n \"bootloader_branch: %s, \"\n \"bootloader_build_id: %s, \"\n \"bootloader_build_target: %s, \"\n \"gpu: %s\"\n \"num: %s, \"\n \"serial_log_file: %s, \"\n \"autoconnect: %s, \"\n \"report_internal_ip: %s\", cfg.project, build_target,\n build_id, branch, kernel_build_id, kernel_branch, kernel_build_target,\n system_branch, system_build_id, system_build_target, bootloader_branch,\n bootloader_build_id, bootloader_build_target, gpu, num, serial_log_file,\n autoconnect, report_internal_ip)\n # If multi_stage enable, launch_cvd don't write serial log to instance. So\n # it doesn't go WaitForBoot function.\n if cfg.enable_multi_stage:\n wait_for_boot = False\n device_factory = CuttlefishDeviceFactory(\n cfg, build_target, build_id, branch=branch,\n kernel_build_id=kernel_build_id, kernel_branch=kernel_branch,\n kernel_build_target=kernel_build_target, system_branch=system_branch,\n system_build_id=system_build_id,\n system_build_target=system_build_target,\n bootloader_branch=bootloader_branch,\n bootloader_build_id=bootloader_build_id,\n bootloader_build_target=bootloader_build_target,\n boot_timeout_secs=boot_timeout_secs,\n ins_timeout_secs=ins_timeout_secs,\n report_internal_ip=report_internal_ip,\n gpu=gpu)\n return common_operations.CreateDevices(\"create_cf\", cfg, device_factory,\n num, constants.TYPE_CF,\n report_internal_ip, autoconnect,\n serial_log_file, client_adb_port,\n boot_timeout_secs, unlock_screen,\n wait_for_boot)", "def test_create_device_data(self):\n pass", "def create_device(jwt: str, template_id: str, label: str) -> str:\n LOGGER.debug(\"Creating template...\")\n\n args = {\n \"url\": \"{0}/device\".format(CONFIG['dojot']['url']),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {0}\".format(jwt),\n },\n \"data\": json.dumps({\n \"templates\": [template_id],\n \"attrs\": {},\n \"label\": label,\n }),\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\"... created the template\")\n return res[\"devices\"][0][\"id\"]", "def post(self, request, format=None):\n site_owner = SiteOwner.objects.get(user=self.request.user)\n num_gateways = Gateway.objects.filter(site_owner=site_owner).count()\n if num_gateways == 4:\n return Response(\"Maximum number of gateways\", 204)\n next_index = num_gateways + 1\n\n gateway_id = f\"{site_owner.postal_code}-{site_owner.unit_no}-{next_index}\"\n\n gateway = Gateway(\n gateway_id=gateway_id,\n site_owner=site_owner,\n )\n gateway.save()\n\n serializer = GatewaySerializer(gateway)\n return Response(serializer.data)", "def post(self, *args, **kwargs):\n\n addr = EtherAddress(kwargs['addr'])\n\n if 'desc' in kwargs:\n device = self.service.create(addr, kwargs['desc'])\n else:\n device = self.service.create(addr)\n\n self.set_header(\"Location\", \"/api/v1/vbses/%s\" % device.addr)" ]
[ "0.6836808", "0.67288667", "0.6622431", "0.6516142", "0.6516142", "0.6478883", "0.64679116", "0.6254829", "0.6207394", "0.61746645", "0.61678076", "0.6118375", "0.6097656", "0.60154575", "0.5961961", "0.59553105", "0.58612436", "0.5842456", "0.58021945", "0.57906455", "0.5778429", "0.5758435", "0.57325226", "0.57111555", "0.56938165", "0.5677012", "0.5664323", "0.5651537", "0.5636245", "0.562303" ]
0.8448245
0
Updates a new gateway device.
def update_gateway_device(self, gateway_device_id, body=None): return self.put(self.gateway_device_path % gateway_device_id, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self) -> None:\n self._gateway.update()", "def update(self):\n self.device = self._api.device_query(self._hardware_address, {})", "def update_device(self, device: Device) -> None:\n self._devices[device.name] = device", "def update(self):\n try:\n self._device.update()\n except requests.exceptions.HTTPError as ex:\n _LOGGER.warning(\"Fritzhome connection error: %s\", ex)\n self._fritz.login()", "def update_device(cls, device_uuid, values):\n return cls.dbdriver.update_device(device_uuid, values)", "def update_network_gateway(self, gateway_id, body=None):\r\n return self.put(self.network_gateway_path % gateway_id, body=body)", "def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)", "def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info", "def update_device(cls, params, device_id, token):\n try:\n content_type = params.get('content_type')\n data_request = params.get('data')\n\n device_data, json_payload = parse_payload(content_type, data_request, device_schema)\n validate_repeated_attrs(json_payload)\n\n tenant = init_tenant_context(token, db)\n old_orm_device = assert_device_exists(device_id)\n db.session.delete(old_orm_device)\n db.session.flush()\n\n # handled separately by parse_template_list\n device_data.pop('templates')\n updated_orm_device = Device(**device_data)\n parse_template_list(json_payload.get('templates', []), updated_orm_device)\n auto_create_template(json_payload, updated_orm_device)\n updated_orm_device.id = device_id\n updated_orm_device.updated = datetime.now()\n updated_orm_device.created = old_orm_device.created\n\n db.session.add(updated_orm_device)\n\n db.session.commit()\n except IntegrityError as error:\n handle_consistency_exception(error)\n except ValidationError as error:\n raise HTTPRequestError(400, error.messages)\n\n full_device = serialize_full_device(updated_orm_device, tenant)\n\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.update(full_device, meta={\"service\": tenant})\n\n result = {\n 'message': 'device updated',\n 'device': serialize_full_device(updated_orm_device, tenant)\n }\n return result", "def update_device(device):\n payload = request.get_json()\n if ('name' in payload) and (payload['name'] != device):\n raise BadRequest(\n 'Device name does not match between URL and JSON payload')\n try:\n properties = devices.show(device)\n for k in payload:\n properties[k] = payload[k]\n except KeyDoesNotExist:\n properties = payload\n return _register_device(properties)", "def update(self):\n self.device.update()", "def update(self):\n self.device.update()", "def update(self, dev_id, update):\n headers = self.get_headers()\n url = self.url + '/device/' + str(dev_id)\n rv = self.api_request('POST', url, update, headers)\n if rv is None:\n raise Exception('Failed to update device schema')\n return rv", "def update(self):\n return self._api.update_payment_gateway(**to_dict(self))", "async def async_update_device_info(self) -> None:\n data = await self._async_request(\"get\", \"device\")\n self._device_info = cast(Dict[str, Any], data)", "def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()", "def update(self):\n self._device = self._geizhals.parse()", "def update(self):\n self._device.update()", "def async_update_device(self) -> None:", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def put(self):\n dev = self.request.get('device')\n reg = self.request.get('registry')\n uploaded_file = self.request.POST.get('data')\n data = uploaded_file.file.read()\n\n self.response.headers['Content-Type'] = 'text/plain'\n if (not dev) and len(dev)==0:\n self.response.write('parameter device not found')\n elif (not reg) and len(reg)==0:\n self.response.write('parameter registry not found')\n elif (not data) and len(data)==0:\n self.response.write('invalid or no key file found')\n else:\n # Get user account\n ds = Datastore()\n user = ds.get_registry(reg)\n if len(user) == 0:\n self.response.write(\"Registry does not exist\")\n else:\n region = get_region_from_user(user)\n\n # Add Device on IOT Core\n iot = IOT()\n success, message = iot.create_device(dev, reg, data, region)\n if success:\n self.response.write('Device Added')\n else:\n self.response.write(message)", "def put(self, request, pk, format=None):\n gateway = self.get_object(pk)\n\n # Get token value\n auth = self.request.headers[\"Authorization\"].split()\n if auth[0].lower() != \"token\":\n return Response(404)\n if len(auth) == 1:\n return Response(404)\n elif len(auth) > 2:\n return Response(404)\n token = auth[1]\n token_hash = hashlib.sha256(token.encode()).hexdigest()\n\n site_owner = SiteOwner.objects.get(user=self.request.user)\n gateways = Gateway.objects.filter(site_owner=site_owner)\n\n # Check gateway to update belongs to site owner\n if gateway not in gateways:\n return Response(\"Gateway does not belong to site owner\", 403)\n\n # Toggle token value\n if gateway.authentication_token == None:\n # Start gateway\n # Check that current token key has not been used\n for check_gateway in gateways:\n if token_hash == check_gateway.authentication_token:\n return Response(\"Token already used\")\n gateway.authentication_token = token_hash\n else:\n # Stop gateway\n gateway.authentication_token = None\n gateway.save()\n\n serializer = GatewaySerializer(gateway)\n return Response(serializer.data)", "def put(self, request, registration_id):\n Device.objects.update_or_create(user=request.user,\n registration_id=registration_id)\n return Response(status=rest_framework.status.HTTP_201_CREATED)", "def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)", "def update_device(device_id):\n netAdminToolDB = app.config['DATABASE']\n device = netAdminToolDB.get_device(device_id)\n #print(f'update_device request = {request.get_data()}')\n if device == None:\n return jsonify({'error': 'Device_id not found'}), 404\n\n input = request.get_json()\n\n if input == None:\n return jsonify({'error': 'Invalid PUT request'}), 400\n\n # Get update values from device for supported keys with value None\n if 'sw_version' in input and input['sw_version'] == None:\n # If device credentials were provided\n if 'device_username' and 'device_password' in input:\n input['sw_version'] = get_version_from_device(device,\n input['device_username'], input['device_password'])\n if input['sw_version'] == None:\n return jsonify({'error': 'Unable to retrieve sw_version from device.'}), 404\n # Device credentials not provided, return error\n else:\n return jsonify({'error': 'Updates from device require credentials.'}), 400\n\n if 'serial_number' in input and input['serial_number'] == None:\n # If device credentials were provided\n if 'device_username' and 'device_password' in input:\n input['serial_number'] = get_serial_from_device(device,\n input['device_username'], input['device_password'])\n if input['serial_number'] == None:\n return jsonify({'error': 'Unable to retrieve serial_number from device.'}), 404\n # Device credentials not provided, return error\n else:\n return jsonify({'error': 'Updates from device require credentials.'}), 400\n\n # Send input directly to update_device function, which checks each key.\n netAdminToolDB.update_device(device_id, **input)\n device = netAdminToolDB.get_device(device_id)\n deviceDict = dict(device)\n uri = url_for('get_device',device_id=device.id,_external=True)\n deviceDict['uri'] = uri\n\n return jsonify({'device': deviceDict}), 200", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def update_connected_device(service):\n\n update_obj = service.data.get('value')\n\n connected_devices = hass.states.get('connected_devices.connected_devices').as_dict()\n \n attributes = connected_devices[\"attributes\"]\n\n for obj in update_obj:\n # _LOGGER.info(\"update value: %s\", obj[\"value\"])\n # _LOGGER.info(\"target: %s\", obj[\"target\"])\n # _LOGGER.info(\"previous value: %s\", attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]])\n\n attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]] = obj[\"value\"]\n # _LOGGER.info(\"after update: %s\", attributes[\"devices\"][obj[\"device\"]][obj[\"target\"]])\n \n connected_devices[\"attributes\"] = attributes\n\n hass.states.set('connected_devices.connected_devices', 'On', attributes, True)", "def setGateway(self, gateway):\n # type: (str)->None\n\n self._validator.validate_one(\n 'gateway', VALID_OPTS['gateway'], gateway)\n self._ifAttributes['gateway'] = gateway", "def edit_payment_gateway(cls, api, id, **data):\n return api.update_payment_gateway(id, **data)" ]
[ "0.69497615", "0.65185535", "0.64119345", "0.6394917", "0.639221", "0.6377932", "0.6322422", "0.63127756", "0.62251025", "0.6208143", "0.6164233", "0.6164233", "0.61302495", "0.61273617", "0.61150837", "0.60159", "0.5983123", "0.59802514", "0.5960421", "0.5892624", "0.5892624", "0.58402735", "0.581683", "0.5799642", "0.5772192", "0.5730886", "0.5706429", "0.56807053", "0.5655966", "0.5560225" ]
0.77028626
0
Delete the specified gateway device.
def delete_gateway_device(self, gateway_device_id): return self.delete(self.gateway_device_path % gateway_device_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_network_gateway(self, gateway_id):\r\n return self.delete(self.network_gateway_path % gateway_id)", "def delete_device(device_id):\n netAdminToolDB = app.config['DATABASE']\n device = netAdminToolDB.get_device(device_id)\n\n if device == None:\n return jsonify({'error': 'Device_id not found'}), 404\n\n netAdminToolDB.delete_device(device_id)\n return jsonify({'result': True})", "def delete_device(device):\n if device in devices.list():\n devices.delete(device)\n return '', 204\n else:\n raise BadRequest('The given device name does not exist')", "def delete_device(cls, device_id, token):\n\n tenant = init_tenant_context(token, db)\n orm_device = assert_device_exists(device_id)\n data = serialize_full_device(orm_device, tenant)\n\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.remove(data, meta={\"service\": tenant})\n\n db.session.delete(orm_device)\n db.session.commit()\n\n results = {'result': 'ok', 'removed_device': data}\n return results", "def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)", "def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def delete(self, request, format=None):\n site_owner = SiteOwner.objects.get(user=self.request.user)\n gateway_to_delete = (\n Gateway.objects.filter(site_owner=site_owner).order_by(\"gateway_id\").last()\n )\n if gateway_to_delete is None:\n return Response(\"\", 204)\n\n serializer = GatewaySerializer(gateway_to_delete)\n try:\n gateway_to_delete.delete()\n except ProtectedError:\n return Response(\"\", 204)\n return Response(serializer.data)", "def delete_device(self, device: Device) -> None:\n self._devices.pop(device.name, None)", "def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device", "def delete_device(self, device_id: str):\n return delete_device(self.api_key, device_id)", "def delete_device(self, loggly_device):\n\n path = 'devices/%s/' % loggly_device.id\n\n response = self._loggly_delete(path)\n\n return \"%s:%s\" % (response.status_code, response.text)", "def delete_device(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.delete_device) ENABLED START #\n # PROTECTED REGION END # // SdpMasterLeafNode.delete_device", "def delete_gateway(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_gateway')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n url = '/gateways/{0}'.format(\n *self.encode_path_vars(id))\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def delete_nat_gateway(ec2_client=None, module=None, nat_gateway=None):\n nat_gateway_address = nat_gateway.get('NatGatewayAddresses')[0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n results = dict(changed=True, nat_gateway_id=nat_gateway_id,\n public_ip=nat_gateway_address.get('PublicIp'),\n private_ip=nat_gateway_address.get('PrivateIp'),\n allocation_id=nat_gateway_address.get('AllocationId'))\n ec2_client.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n\n wait = module.params.get('wait')\n if wait:\n wait_timeout = time.time() + module.params.get('wait_timeout')\n while wait_timeout > time.time():\n nat_gateway_status_list = get_nat_gateway_status_list(ec2_client=ec2_client, module=module)\n if nat_gateway_status_list[0].get('state') in ('deleted', 'absent'):\n module.exit_json(**results)\n else:\n time.sleep(5)\n module.fail_json(msg=\"Waited too long for VPC NAT Gateway to be deleted.\")\n else:\n module.exit_json(**results)", "def delete(self, request, registration_id):\n Device.objects.filter(registration_id=registration_id).delete()\n return Response(status=rest_framework.status.HTTP_200_OK)", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass", "def delete_device(self, device_id, give_json=False):\n\n url = Constants.BASE_URL + 'users/devices'\n response = requests.delete(url=url, params={'key': self.user_access_token, 'device_id': device_id})\n\n if give_json:\n return response.json()\n else:\n return response.text", "def test_delete_device_by_id(self):\n pass", "def deleteDevice(serial):\n swDB = switchdb.DB()\n swDB.deleteBySerial(serial)\n swDB.close()", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete_entity(self, device_id):\n url = '{}/iot/devices/{}'.format(self.url, device_id)\n r = requests.delete(url, headers=self.headers)\n return r", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def show_gateway_device(self, gateway_device_id, **_params):\r\n return self.get(self.gateway_device_path % gateway_device_id,\r\n params=_params)", "def delete_device(self):\n # PROTECTED REGION ID(CspSubElementSubarray.delete_device) ENABLED START #\n # PROTECTED REGION END # // CspSubElementSubarray.delete_device", "def destroy(self, context, device_id):\n\n # get deployable_id by name, get only one value.\n dep_obj = Deployable.get_by_name_deviceid(context, self.name,\n device_id)\n # delete attach_handle\n if hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n # get attach_handle_obj, exist and only one.\n driver_ah_obj.destroy(context, dep_obj.id)\n # delete attribute_list\n if hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\n # delete dep_obj\n if dep_obj is not None:\n dep_obj.destroy(context)", "def garage_controller_delete(self, garage_controller_id=None, path=None):\n if path == None:\n path = []\n if garage_controller_id:\n path.insert(0, garage_controller_id)\n path.insert(0, \"garagecontroller\")\n location = '/'.join(path)\n return self.send_delete(location, params={})", "def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)", "def delete(device_id):\n api_response = requests.delete(\n \"https://api.serverdensity.io/inventory/devices/\" + device_id,\n params={\"token\": get_sd_auth(\"api_token\")},\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\"Could not parse API Response content: %s\", api_response.content)\n raise CommandExecutionError(\n \"Failed to create, API Response: {}\".format(api_response)\n )\n else:\n return None", "def test_delete_device_by_id1(self):\n pass" ]
[ "0.73596257", "0.70437884", "0.70126605", "0.69349265", "0.6878977", "0.68403476", "0.67639136", "0.6751011", "0.67302734", "0.6588201", "0.64607847", "0.62781656", "0.6269225", "0.6235544", "0.6220198", "0.6181021", "0.6181021", "0.60968906", "0.5993642", "0.59863865", "0.59747624", "0.5912938", "0.5880005", "0.586579", "0.5794653", "0.57847625", "0.5777951", "0.57464075", "0.57418513", "0.5739248" ]
0.8634534
0
Fetches a list of dhcp agents hosting a network.
def list_dhcp_agent_hosting_networks(self, network, **_params): return self.get((self.network_path + self.DHCP_AGENTS) % network, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_dhcp_agent_hosting_network(self):\n self.admin_networks_client.list_dhcp_agents_on_hosting_network(\n self.network['id'])", "def test_list_networks_hosted_by_one_dhcp(self):\n body = self.admin_networks_client.list_dhcp_agents_on_hosting_network(\n self.network['id'])\n agents = body['agents']\n self.assertNotEmpty(agents, \"no dhcp agent\")\n agent = agents[0]\n self.assertTrue(self._check_network_in_dhcp_agent(\n self.network['id'], agent))", "def list_networks_on_dhcp_agent(self, dhcp_agent, **_params):\r\n return self.get((self.agent_path + self.DHCP_NETS) % dhcp_agent,\r\n params=_params)", "def test_list_networks_hosted_by_one_dhcp_agent(self):\n with self.override_role():\n self.agents_client.list_networks_hosted_by_one_dhcp_agent(\n self.agent['id'])", "def net_list_on_dhcp_agent(mgr_or_client, *args, **kwargs):\n return net_list(mgr_or_client, *args, **kwargs)", "def auto_schedule_networks(self, plugin, context, host):\n agents_per_network = cfg.CONF.dhcp_agents_per_network\n # a list of (agent, net_ids) tuples\n bindings_to_add = []\n with context.session.begin(subtransactions=True):\n fields = ['network_id', 'enable_dhcp']\n subnets = plugin.get_subnets(context, fields=fields)\n net_ids = set(s['network_id'] for s in subnets\n if s['enable_dhcp'])\n if not net_ids:\n LOG.debug('No non-hosted networks')\n return False\n query = context.session.query(agents_db.Agent)\n query = query.filter(agents_db.Agent.agent_type ==\n constants.AGENT_TYPE_DHCP,\n agents_db.Agent.host == host,\n agents_db.Agent.admin_state_up == sql.true())\n dhcp_agents = query.all()\n for dhcp_agent in dhcp_agents:\n if agents_db.AgentDbMixin.is_agent_down(\n dhcp_agent.heartbeat_timestamp):\n LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id)\n continue\n for net_id in net_ids:\n agents = plugin.get_dhcp_agents_hosting_networks(\n context, [net_id], active=True)\n if len(agents) >= agents_per_network:\n continue\n if any(dhcp_agent.id == agent.id for agent in agents):\n continue\n bindings_to_add.append((dhcp_agent, net_id))\n # do it outside transaction so particular scheduling results don't\n # make other to fail\n for agent, net_id in bindings_to_add:\n self._schedule_bind_network(context, [agent], net_id)\n return True", "def schedule(self, plugin, context, network):\n agents_per_network = cfg.CONF.dhcp_agents_per_network\n\n #TODO(gongysh) don't schedule the networks with only\n # subnets whose enable_dhcp is false\n with context.session.begin(subtransactions=True):\n dhcp_agents = plugin.get_dhcp_agents_hosting_networks(\n context, [network['id']], active=True)\n if len(dhcp_agents) >= agents_per_network:\n LOG.debug('Network %s is hosted already',\n network['id'])\n return\n n_agents = agents_per_network - len(dhcp_agents)\n enabled_dhcp_agents = plugin.get_agents_db(\n context, filters={\n 'agent_type': [constants.AGENT_TYPE_DHCP],\n 'admin_state_up': [True]})\n if not enabled_dhcp_agents:\n LOG.warn(_LW('No more DHCP agents'))\n return\n active_dhcp_agents = [\n agent for agent in set(enabled_dhcp_agents)\n if not agents_db.AgentDbMixin.is_agent_down(\n agent['heartbeat_timestamp'])\n and agent not in dhcp_agents\n ]\n if not active_dhcp_agents:\n LOG.warn(_LW('No more DHCP agents'))\n return\n n_agents = min(len(active_dhcp_agents), n_agents)\n chosen_agents = random.sample(active_dhcp_agents, n_agents)\n self._schedule_bind_network(context, chosen_agents, network['id'])\n return chosen_agents", "def _get_guests():\n _guests = list()\n try:\n conn = libvirt.open(None)\n if conn:\n _domains = conn.listAllDomains(0)\n else:\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n finally:\n conn.close()\n return _domains", "def getHosts(self):\n raise \"not implemented\"", "def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass", "def getHosts(**options):\n return search.HostSearch.byOptions(**options)", "def list_l3_agent_hosting_routers(self, router, **_params):\r\n return self.get((self.router_path + self.L3_AGENTS) % router,\r\n params=_params)", "def get_hosts(self):\n\n raise NotImplementedError", "def getAllHosts(cluster):\n nics = []\n hosts = rhevGet(\"/api/hosts\")\n doc = libxml2.parseDoc(hosts)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/hosts/host[cluster[@id='\" + getClusterData(cluster ,\"id\") + \"']]\")\n for i in res:\n #hrefs.append(i.prop(\"href\"))\n nic = rhevGet(i.prop(\"href\")+\"/nics\")\n nicdoc = libxml2.parseDoc(nic)\n ctxt = nicdoc.xpathNewContext()\n res = ctxt.xpathEval(\"/host_nics/host_nic/name[text() = '%s']/parent::*\" %rhev_settings.NIC)\n for i in res:\n nics.append(i.prop(\"href\"))\n return nics", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def get_hosts(network):\n\n nm = nmap.PortScanner()\n nm.scan(network, arguments=\"-sn\")\n\n for host in nm.all_hosts():\n details = nm[host]\n\n if \"ipv4\" not in details[\"addresses\"]:\n continue\n\n vendor = details.get(\"vendor\")\n vendor_name = None\n\n if vendor and len(vendor) > 0:\n vendor_name = list(vendor.values())[0]\n\n yield {\n \"hostname\": details.get(\"hostname\"),\n \"ip\": details[\"addresses\"][\"ipv4\"],\n \"vendor\": vendor_name\n }", "def all_hosts(self):\n ...", "def get_all_hosts_puppetdb():\n\n puppetdb_api_url = config['puppetdb_api_url']\n puppetdb_certfile = config.get('puppetdb_certfile', None)\n puppetdb_keyfile = config.get('puppetdb_keyfile', None)\n puppetdb_cafile = config.get('puppetdb_cafile', None)\n\n # query to match only puppet hosts with Check_mk::Agent class\n query = {\n 'query': ['=', 'type', 'Check_mk::Agent'],\n }\n\n r = requests.post(puppetdb_api_url, json=query,\n cert=(puppetdb_certfile, puppetdb_keyfile), verify=puppetdb_cafile)\n\n hosts = {}\n for res in r.json():\n tags = res['tags']\n hostname = res['certname']\n host_environment = res['environment']\n for tag in res['tags']:\n if tag.startswith('roles::') or tag.startswith('role::'):\n host_role = tag.split('::')[1]\n hosts[hostname] = { 'puppet_environment': host_environment,\n 'puppet_role': host_role }\n\n logging.info('got %s hosts from puppetdb', len(hosts))\n\n return hosts", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def get_hosts(self):\n\n hosts = self.client.service.getHosts()\n return hosts", "def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)", "def add_network_to_dhcp_agent(self, dhcp_agent, body=None):\r\n return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent,\r\n body=body)", "def test_add_remove_network_from_dhcp_agent(self):\n # The agent is now bound to the network, we can free the port\n self.ports_client.delete_port(self.port['id'])\n agent = dict()\n agent['agent_type'] = None\n body = self.admin_agents_client.list_agents()\n agents = body['agents']\n for a in agents:\n if a['agent_type'] == 'DHCP agent':\n agent = a\n break\n self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '\n 'DHCP agent in agent list though dhcp_agent_scheduler'\n ' is enabled.')\n network = self.create_network()\n network_id = network['id']\n if self._check_network_in_dhcp_agent(network_id, agent):\n self._remove_network_from_dhcp_agent(network_id, agent)\n self._add_dhcp_agent_to_network(network_id, agent)\n else:\n self._add_dhcp_agent_to_network(network_id, agent)\n self._remove_network_from_dhcp_agent(network_id, agent)", "def dhcp_cmd(args):\n if VERSION_LIVEBOX == 'lb28':\n dhcpv4_object = 'NMC'\n else:\n dhcpv4_object = 'DHCPv4.Server.Pool.default'\n requete_print(dhcpv4_object + \":getStaticLeases\")", "def hosts_cmd(args):\n r = requete(\"Hosts.Host:get\")\n if not r:\n return\n if len(args) > 0:\n for i in range(0, len(args)):\n for _, host in r['status'].items():\n if (host['MACAddress'].lower() == args[i].lower()\n or host['HostName'].lower() == args[i].lower()\n or host['IPAddress'] == args[i]):\n # pprint.pprint(host)\n json.dump(host, sys.stdout, indent=4)\n else:\n #pprint.pprint(r['status'])\n for _, host in r['status'].items():\n actif = \" \" if host['Active'] else \"*\"\n if mac_parser is None:\n s = \"%-18s %-15s %c %-35s %s\" % (host['MACAddress'], host['InterfaceType'], actif, host['HostName'], host['IPAddress'])\n else:\n s = \"%-18s %-12s %-15s %c %-35s %s\" % (host['MACAddress'], mac_parser.get_manuf(host['MACAddress']), host.get('InterfaceType', \"\"), actif, host['HostName'], host['IPAddress'])\n print(s)", "def all_hosts(self):\n if not 'scan' in list(self._scan_result.keys()):\n return []\n listh = list(self._scan_result['scan'].keys())\n listh.sort()\n return listh", "def get_hosts(self, target, listener_type):", "def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']", "def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)", "def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))" ]
[ "0.7808694", "0.7578811", "0.71674603", "0.69997406", "0.67217845", "0.65720654", "0.64308", "0.6159891", "0.58248585", "0.5748065", "0.57080424", "0.5690139", "0.56564534", "0.56515914", "0.56087554", "0.5607984", "0.55518776", "0.55417496", "0.55108505", "0.549798", "0.5483908", "0.54792905", "0.54785955", "0.5397229", "0.53909606", "0.5376879", "0.53655285", "0.53392327", "0.5317389", "0.53160655" ]
0.82947797
0
Adds a network to dhcp agent.
def add_network_to_dhcp_agent(self, dhcp_agent, body=None): return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dhcp_agent_network_add(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def add_network(self, network, distance):\n self.networks[network] = distance", "def nic_add(args):\n name = args.name\n network = args.network\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if network is None:\n common.pprint(\"Missing network. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding Nic to %s...\" % name)\n k.add_nic(name=name, network=network)", "def test_add_dhcp_agent_to_network(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n\n with self.override_role():\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.", "def test_add_remove_network_from_dhcp_agent(self):\n # The agent is now bound to the network, we can free the port\n self.ports_client.delete_port(self.port['id'])\n agent = dict()\n agent['agent_type'] = None\n body = self.admin_agents_client.list_agents()\n agents = body['agents']\n for a in agents:\n if a['agent_type'] == 'DHCP agent':\n agent = a\n break\n self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '\n 'DHCP agent in agent list though dhcp_agent_scheduler'\n ' is enabled.')\n network = self.create_network()\n network_id = network['id']\n if self._check_network_in_dhcp_agent(network_id, agent):\n self._remove_network_from_dhcp_agent(network_id, agent)\n self._add_dhcp_agent_to_network(network_id, agent)\n else:\n self._add_dhcp_agent_to_network(network_id, agent)\n self._remove_network_from_dhcp_agent(network_id, agent)", "def add_network(self, router, network, distance):\n self.routers[router].add_network(network, distance)", "def _add_netif(self, instance, netif_number=0,\n host_if=False,\n bridge=FLAGS.ovz_bridge_device):\n # TODO(imsplitbit): fix this to be nova-ish i.e. async\n try:\n # Command necessary to create a bridge networking setup.\n # right now this is the only supported networking model\n # in the openvz connector.\n if not host_if:\n host_if = 'veth%s.%s' % (instance['id'], netif_number)\n\n out, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--netif_add',\n 'eth%s,,%s,,%s' % (netif_number,\n host_if, bridge))\n\n LOG.debug(out)\n\n if err:\n LOG.error(err)\n\n except ProcessExecutionError:\n raise exception.Error(\n 'Error adding network device to container %s' %\n instance['id'])", "def add_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = \"assigned\"\n nic_spec.device.deviceInfo = vim.Description()\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.network = network_obj\n nic_spec.device.backing.deviceName = network_obj.name\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def AddNetwork(parser):\n parser.add_argument(\n '--network',\n help=(\n 'Network in the current project that the instance will be part '\n 'of. To specify using a network with a shared VPC, use the full '\n \"URL of the network. For an example host project, 'testproject', \"\n \"and shared network, 'testsharednetwork', this would use the \"\n 'form: '\n '`--network`=`projects/testproject/global/networks/'\n 'testsharednetwork`'\n ),\n )", "def addNetwork(self, session: Session, network: Network) -> int:\n try:\n return NetworkManager().addNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def add_network(self, name_of_vm, port_group):\n adapter_type = 'e1000'\n vds = \"yes\"\n try:\n # import sys,pdb;pdb.Pdb(stdout=sys.__stdout__).set_trace()\n vmachine = self.vcenter.get_dc_object([vim.VirtualMachine], name_of_vm)\n\n if vds == 'yes':\n network = self.vcenter.get_dc_object([vim.dvs.DistributedVirtualPortgroup], port_group)\n else:\n network = self.get_network(port_group)\n\n new_nic = vim.vm.ConfigSpec()\n nic_changes = []\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n if adapter_type == 'e1000':\n nic_spec.device = vim.vm.device.VirtualE1000()\n elif adapter_type == 'vmxnet2':\n nic_spec.device = vim.vm.device.VirtualVmxnet2()\n else:\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.deviceInfo = vim.Description()\n if vds == 'yes':\n vir_port = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n nic_spec.device.backing = vir_port\n dvs_port_connection = vim.dvs.PortConnection()\n dvs_port_connection.portgroupKey = network.key\n dvs_port_connection.switchUuid = network.config.distributedVirtualSwitch.uuid\n nic_spec.device.backing.port = dvs_port_connection\n else:\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.backing.network = network\n nic_spec.device.backing.deviceName = port_group\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n nic_spec.device.connectable.status = 'untried'\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = 'assigned'\n nic_changes.append(nic_spec)\n new_nic.deviceChange = nic_changes\n add_nic = vmachine.ReconfigVM_Task(spec=new_nic)\n log.info('Adding Network adapter to the VM...')\n while add_nic.info.state not in ['success', 'error']:\n time.sleep(1)\n status = add_nic.info.state\n if status == 'success':\n log.info('Nic added successfully: {}'.format(name_of_vm))\n if status == 'error':\n log.info('Could not add Network adapter {}'.format(name_of_vm))\n return status\n\n except Exception as error:\n log.info(\"Caught exception: {} \\n {}\".format(error, error.message))", "def AddNetworkFlag(parser):\n help_text = \"\"\"\\\n The VPC network from which the AlloyDB instance is accessible via private\n IP. For example, projects/myProject/global/networks/default. This setting\n cannot be updated after it is set.\n \"\"\"\n parser.add_argument('--network', help=help_text)", "def add_network(self, addr, netmask):\n\n if len(addr) == 4:\n return ipset.ipset_ipv4_add_network(self.set, addr, netmask)\n\n elif len(addr) == 16:\n return ipset.ipset_ipv6_add_network(self.set, addr, netmask)\n\n else:\n raise ValueError(\"Invalid address\")", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def attach_to_network(self, network, ip=None):\n # type: (Union[Network,BoundNetwork],Optional[str]) -> BoundAction\n return self._client.attach_to_network(self, network, ip)", "def test_add_network(self):\n pass", "def do_network_attach(cs, args):\n opts = {}\n opts['container'] = args.container\n opts['network'] = args.network\n opts['port'] = args.port\n opts['fixed_ip'] = args.fixed_ip\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.network_attach(**opts)\n print(\"Request to attach network to container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Attach network to container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def set_network(self, network: str) -> None:\n return self.add_value(self._network_attribute, network)", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def add_provider_network(network_id, network_type, segmentation_id):\n session = db.get_session()\n if session.query(network_models_v2.ProviderNetwork).filter_by(\n network_id=network_id).first():\n raise c_exc.ProviderNetworkExists(network_id)\n pnet = network_models_v2.ProviderNetwork(network_id=network_id,\n network_type=network_type,\n segmentation_id=segmentation_id)\n session.add(pnet)\n session.flush()", "def add_network_interface(self, iface: 'NetworkInterface',\n is_gateway: bool = False):\n self.ifaces.append(iface)\n if is_gateway:\n self._gateway = iface", "def elAddNetworkConfigurationWithDhcp(self, device):\n commandSection = self.sectionByName(\"command\")\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n deviceMatch = re.match(r\"([^0-9]+)([0-9])\", device)\n if deviceMatch:\n # e.g. \"eth0\"\n devicePrefix = deviceMatch.group(1)\n deviceNumber = deviceMatch.group(2)\n deviceNumber = int(deviceNumber)\n for i in range(8, deviceNumber - 1, -1):\n deviceI = devicePrefix + str(i)\n deviceIPlus1 = devicePrefix + str(i + 1)\n # move up by one device each network configuration\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--device[ \\t]*(?:=|[ \\t])[ \\t]*)\" + re.escape(deviceI) + r\"(.*)$\",\n r\"\\g<1>\" + deviceIPlus1 + r\"\\g<2>\",\n commandSection.string)\n # not --noipv6\n networkConfiguration = \"network --device=\" + device + \" --bootproto=dhcp --onboot=yes --activate\"\n if deviceMatch and deviceNumber == 0:\n # having configuration of eth0 first appears to be more conducive to overall success,\n # and also, per http://fedoraproject.org/wiki/Anaconda/Kickstart#network, supposedly\n # \"... in installer environment. Device of the first network command is activated if network is required,\n # e.g. in case of network installation ...\",\n commandSection.string = networkConfiguration + \"\\n\" \\\n + \"#\\n\" \\\n + commandSection.string\n else:\n commandSection.string = commandSection.string \\\n + \"#\\n\" \\\n + networkConfiguration + \"\\n\"", "def addNetworkInterface(self, name, link=None, ip=None, up=None, down=None):\r\n if up:\r\n if not os.path.isabs(up):\r\n raise ValueError('Path to up script has to be absolute.')\r\n\r\n if not os.path.isfile(up):\r\n raise ValueError('Path to up script is not a file.')\r\n\r\n if not os.access(up, os.X_OK):\r\n raise ValueError('Up script is not executable.')\r\n\r\n if down:\r\n if not os.path.isabs(down):\r\n raise ValueError('Path to down script has to be absolute.')\r\n\r\n if not os.path.isfile(down):\r\n raise ValueError('Path to down script is not a file.')\r\n\r\n if not os.access(down, os.X_OK):\r\n raise ValueError('Down script is not executable.')\r\n\r\n self._ifs.append((name, link, ip, up, down))", "def add_networks(networks=None,\n skip_generating_certificates=False,\n verbose=False,\n config_file=None):\n setup_console_logger(verbose)\n config.load_config(config_file)\n logger.info('Trying to add new networks to Manager...')\n\n networks = json.loads(networks)\n _validate_networks(networks)\n metadata = load_cert_metadata()\n\n _update_metadata_file(metadata, networks)\n if not skip_generating_certificates:\n create_internal_certs()\n\n script_path = join(SCRIPT_DIR, 'update-manager-networks.py')\n hostname = metadata.get('hostname') or _get_hostname()\n args = [\n '--hostname', hostname,\n '--networks', json.dumps(networks),\n ]\n if bool(metadata.get('broker_addresses')):\n # if we store broker addresses in the metadata file, that means we\n # have a local broker and must update that too\n args.append('--broker')\n\n run_script_on_manager_venv(script_path, script_args=args)\n\n logger.notice('New networks were added successfully. Please restart the'\n ' following services: `nginx`, `cloudify-mgmtworker`,'\n '`cloudify-rabbitmq`')", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def add_virtual_network(self, hVirtNet, nFlags = 0):\n\t\treturn Job(SDK.PrlSrv_AddVirtualNetwork(self.handle, conv_handle_arg(hVirtNet), nFlags)[0])", "def set_network(self, path, ip=\"\", netmask=\"255.255.255.0\", gateway=\"\"):\n\n with open(os.path.join(path, 'etc', 'network', 'interfaces'), 'w') \\\n as f:\n f.write(\"auto lo\\niface lo inet loopback\\n\\n\")\n\n if len(ip) <= 0:\n f.write(\"auto eth0\\niface eth0 inet dhcp\\n\")\n else:\n f.write(\"auto eth0\\niface eth0 inet static\\n\")\n f.write(\"\\taddress {0}\\n\\tnetmask {1}\\n\\tgateway {2}\\n\".\\\n format(ip, netmask, gateway))", "def CreateNetwork(self, network_name, network, gateway=None, network6=None,\n gateway6=None, mac_prefix=None,\n add_reserved_ips=None, tags=None, dry_run=False,\n reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n if add_reserved_ips:\n add_reserved_ips = add_reserved_ips.split(\",\")\n\n if tags:\n tags = tags.split(\",\")\n\n body = {\n \"network_name\": network_name,\n \"gateway\": gateway,\n \"network\": network,\n \"gateway6\": gateway6,\n \"network6\": network6,\n \"mac_prefix\": mac_prefix,\n \"add_reserved_ips\": add_reserved_ips,\n \"tags\": tags,\n }\n\n return self._SendRequest(HTTP_POST, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, body)", "def attach_to_network(self,\n load_balancer, # type: Union[LoadBalancer, BoundLoadBalancer]\n network, # type: Union[Network, BoundNetwork]\n ip=None # type: Optional[str]\n ):\n data = {\"network\": network.id}\n if ip is not None:\n data.update({\"ip\": ip})\n\n response = self._client.request(\n url=\"/load_balancers/{load_balancer_id}/actions/attach_to_network\".format(\n load_balancer_id=load_balancer.id),\n method=\"POST\", json=data)\n return BoundAction(self._client.actions, response['action'])" ]
[ "0.7993567", "0.69905144", "0.695304", "0.6948659", "0.69300616", "0.6838031", "0.6834063", "0.656378", "0.65624505", "0.65344405", "0.63545275", "0.6264123", "0.6201176", "0.61982304", "0.6183074", "0.6177877", "0.6031145", "0.5987884", "0.5983296", "0.596687", "0.59634006", "0.5950143", "0.59261763", "0.5923481", "0.59144175", "0.58983195", "0.58193356", "0.5814669", "0.5800218", "0.576982" ]
0.79406667
1
Remove a network from dhcp agent.
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id): return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % ( dhcp_agent, network_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def test_delete_network_from_dhcp_agent(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.\n\n with self.override_role():\n self.agents_client.delete_network_from_dhcp_agent(\n self.agent['id'], network_id=network_id)", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def test_add_remove_network_from_dhcp_agent(self):\n # The agent is now bound to the network, we can free the port\n self.ports_client.delete_port(self.port['id'])\n agent = dict()\n agent['agent_type'] = None\n body = self.admin_agents_client.list_agents()\n agents = body['agents']\n for a in agents:\n if a['agent_type'] == 'DHCP agent':\n agent = a\n break\n self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '\n 'DHCP agent in agent list though dhcp_agent_scheduler'\n ' is enabled.')\n network = self.create_network()\n network_id = network['id']\n if self._check_network_in_dhcp_agent(network_id, agent):\n self._remove_network_from_dhcp_agent(network_id, agent)\n self._add_dhcp_agent_to_network(network_id, agent)\n else:\n self._add_dhcp_agent_to_network(network_id, agent)\n self._remove_network_from_dhcp_agent(network_id, agent)", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for device in devices:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n networks.append(device)\n status = 'error'\n if not networks:\n log.info(\"INFO: No network adapters connected to the VM to remove\")\n status = 'success'\n else:\n for network in networks:\n name = network.deviceInfo.label\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network\n remove_nic = vim.vm.ConfigSpec()\n remove_nic.deviceChange = [nic_spec]\n task = WaitForTask(vmachine.ReconfigVM_Task(spec=remove_nic))\n\n if task == 'success':\n log.info(\"removed '{}' network adapter : {}\".format(name, name_of_vm))\n else:\n log.info(\"Could not '{}' Remove Network adapter: {}\".format(name, name_of_vm))\n status = 'success'\n return status\n except Exception as error:\n log.info(\"Error in 'remove_nic' keyword... {} \\n {}\".format(error, error.message))", "def remove_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network_obj\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def delete_network(self, network_o):\n tenant_mo = self.moDir.lookupByDn(network_o.group)\n\n # Filters the tenant children in memory looking for the ones that belongs to the Ap class with an specific name\n ap_list = filter(lambda x: type(x).__name__ == 'Ap' and x.name == AP_NAME,\n self.query_child_objects(str(tenant_mo.dn)))\n if len(ap_list) > 0:\n network_ap = ap_list[0]\n # Filters the tenant children in memory looking for the ones that belongs to the AEPg\n # class with an specific name\n network_epgs = filter(lambda x: type(x).__name__ == 'AEPg' and x.name == network_o.name + VLAN_SUFIX +\n str(network_o.encapsulation),\n self.query_child_objects(str(network_ap.dn)))\n # Removes EPG\n if len(network_epgs) > 0:\n network_epgs[0].delete()\n self.commit(network_epgs[0])\n\n # Filters the tenant children in memory looking for the ones that belongs to the BD class and with an specific\n # name\n bd_list = filter(lambda x: type(x).__name__ == 'BD' and x.name == VLAN + str(network_o.encapsulation),\n self.query_child_objects(str(tenant_mo.dn)))\n if len(bd_list) > 0:\n # Removes bridge domain\n bd_list[0].delete()\n self.commit(bd_list[0])", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def test_delete_network(self):\n pass", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return" ]
[ "0.81059325", "0.7416146", "0.7311771", "0.71699446", "0.70607364", "0.69897085", "0.67913854", "0.6775231", "0.67347074", "0.6649926", "0.6647897", "0.65776294", "0.6455701", "0.6360901", "0.6351416", "0.63387024", "0.6319579", "0.63093585", "0.62985367", "0.62240994", "0.61991996", "0.6181161", "0.6152535", "0.61210954", "0.60139453", "0.6010361", "0.5988234", "0.59537375", "0.59388703", "0.5912605" ]
0.81989366
0
Fetches a list of L3 agents hosting a router.
def list_l3_agent_hosting_routers(self, router, **_params): return self.get((self.router_path + self.L3_AGENTS) % router, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_l3_agents_on_router(self):\n with self.override_role():\n # NOTE: It is not empty list since it's a special case where\n # policy.enforce is called from the controller.\n self.ntp_client.list_l3_agents_hosting_router(self.router['id'])", "def router_list_on_l3_agent(mgr_or_client, *args, **kwargs):\n raise (\"Not implemented yet!\")", "def list_routers_on_l3_agent(self, l3_agent, **_params):\r\n return self.get((self.agent_path + self.L3_ROUTERS) % l3_agent,\r\n params=_params)", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def list_router(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing routers.\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List router Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Router List : %s \" % output)\n\n return output[\"routers\"]", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def get_candidates(self, plugin, context, sync_router, subnet_id):\n with context.session.begin(subtransactions=True):\n # allow one router is hosted by just\n # one enabled l3 agent hosting since active is just a\n # timing problem. Non-active l3 agent can return to\n # active any time\n l3_agents = plugin.get_l3_agents_hosting_routers(\n context, [sync_router['id']], admin_state_up=True)\n if l3_agents and not sync_router.get('distributed', False):\n LOG.debug(_('Router %(router_id)s has already been hosted'\n ' by L3 agent %(agent_id)s'),\n {'router_id': sync_router['id'],\n 'agent_id': l3_agents[0]['id']})\n return\n\n active_l3_agents = plugin.get_l3_agents(context, active=True)\n if not active_l3_agents:\n LOG.warn(_('No active L3 agents'))\n return\n new_l3agents = plugin.get_l3_agent_candidates(context,\n sync_router,\n active_l3_agents,\n subnet_id)\n old_l3agentset = set(l3_agents)\n if sync_router.get('distributed', False):\n new_l3agentset = set(new_l3agents)\n candidates = list(new_l3agentset - old_l3agentset)\n else:\n candidates = new_l3agents\n if not candidates:\n LOG.warn(_('No L3 agents can host the router %s'),\n sync_router['id'])\n return\n\n return candidates", "def test_list_routers_on_l3_agent(self):\n with self.override_role():\n self.agents_client.list_routers_on_l3_agent(self.agent['id'])", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def get_agents(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.AGENT:\n ret.append(i)\n return ret", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def agents(self):\n return AgentManager(session=self._session)", "def all_hosts(self):\n ...", "def getHosts(self):\n raise \"not implemented\"", "def do_GET(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.server.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent_id not found\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')\n return\n\n if not agent['active']:\n common.echo_json_response(self, 404, \"agent_id not yet active\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')\n return\n\n response = {\n 'aik': agent['aik'],\n 'ek': agent['ek'],\n 'ekcert': agent['ekcert'],\n 'regcount': agent['regcount'],\n }\n\n if agent['virtual']:\n response['provider_keys']= agent['provider_keys']\n\n common.echo_json_response(self, 200, \"Success\", response)\n logger.info('GET returning 200 response for agent_id:' + agent_id)\n else:\n # return the available registered uuids from the DB\n json_response = self.server.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')\n\n return", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def fetch_router_list(args):\n nd = NetDevices(production_only=opts.nonprod)\n ret = []\n blocked_groups = []\n if args:\n for arg in args:\n # Try to find the device, but fail gracefully if it can't be found\n device = device_match(arg)\n if not pass_filters(device) or device is None:\n continue\n ret.append(device)\n\n else:\n for entry in nd.itervalues():\n if entry.owningTeam in blocked_groups:\n continue\n if not pass_filters(entry):\n continue\n ret.append(entry)\n\n return sorted(ret, reverse=True)", "def list_agents(self, platform_uuid):\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def get_hosts(self):\n\n raise NotImplementedError", "def get_list_hosts(self, path, params):\n eth_src = params.get('eth_src')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_list_hosts(host, eth_src)\n self._augment_state_reply(reply, path)\n return reply", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def show(directory: str, host: Option[str]):\n\n host_regex = re.compile(host) if host else None\n manager = AgentManager(directory)\n agents = []\n hosts = []\n\n for agent in manager.agents:\n if host is None or host_regex.match(agent.host):\n agents.append(agent)\n if agent.host not in hosts:\n hosts.append(agent.host)\n click.echo(f\"{agent.name}@{agent.host}: speed={agent.speed}\")\n\n click.echo(f\"{len(agents)} agents on {len(hosts)} host(s).\")", "def list_hosts():\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n res = hosts.get_all(db)\n res = {'list': res}\n return jsonify(res)", "def hosts(self):\n return self._hosts", "def hosts(self):\n return self._hosts", "def transport_agents(self):\n return self.get(\"transport_agents\")", "def hosts(self):\n return tuple(self.hosts_)", "def list(self, **kwargs):\n\n return self.getResourceManager() \\\n .getSdk() \\\n .hosts \\\n .list(**kwargs)", "def run(self, agent_args=None):\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)" ]
[ "0.7668786", "0.7257907", "0.71452254", "0.6604342", "0.6339175", "0.62582594", "0.62342864", "0.6228603", "0.62164915", "0.6151718", "0.61158353", "0.59450513", "0.5939779", "0.5933268", "0.5877356", "0.5837007", "0.57619286", "0.5721032", "0.56838113", "0.5661482", "0.56472015", "0.56431395", "0.56411433", "0.5621433", "0.5621159", "0.5621159", "0.55831665", "0.55686814", "0.55418926", "0.5482842" ]
0.82901573
0
Adds a router to L3 agent.
def add_router_to_l3_agent(self, l3_agent, body): return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_gateway_router(self, router, body=None):\r\n return self.put((self.router_path % router),\r\n body={'router': {'external_gateway_info': body}})", "def add_router(self):\n router = OrderedDict({self.router_name: {\n 'type': 'OS::Neutron::Router',\n 'properties': {\n 'name': self.router_name,\n 'external_gateway_info': {\n 'network': { 'get_param': 'public_net' }\n }\n }\n }})\n self.template['resources'].update(router)", "def test_create_router_on_l3_agent(self):\n with self.override_role():\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])", "def add_interface_router(self, router, body=None):\r\n return self.put((self.router_path % router) + \"/add_router_interface\",\r\n body=body)", "def add_network(self, router, network, distance):\n self.routers[router].add_network(network, distance)", "def bind_router(self, context, router_id, chosen_agent):\n try:\n with context.session.begin(subtransactions=True):\n binding = l3_agentschedulers_db.RouterL3AgentBinding()\n binding.l3_agent = chosen_agent\n binding.router_id = router_id\n context.session.add(binding)\n except db_exc.DBDuplicateEntry:\n LOG.debug('Router %(router_id)s has already been scheduled '\n 'to L3 agent %(agent_id)s.',\n {'agent_id': chosen_agent.id,\n 'router_id': router_id})\n return\n\n LOG.debug('Router %(router_id)s is scheduled to L3 agent '\n '%(agent_id)s', {'router_id': router_id,\n 'agent_id': chosen_agent.id})", "def create_lrouter_in_ovn(self, router):\n\n external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:\n router.get('name', 'no_router_name')}\n enabled = router.get('admin_state_up')\n lrouter_name = utils.ovn_name(router['id'])\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.create_lrouter(lrouter_name,\n external_ids=external_ids,\n enabled=enabled,\n options={}))", "def add_route(enode, route, via, shell=None):\n via = ip_address(via)\n\n version = '-4'\n if (via.version == 6) or \\\n (route != 'default' and ip_network(route).version == 6):\n version = '-6'\n\n cmd = 'ip {version} route add {route} via {via}'.format(\n version=version, route=route, via=via\n )\n\n response = enode(cmd, shell=shell)\n assert not response", "def create_router(self, body=None):\r\n return self.post(self.routers_path, body=body)", "def add_route(app, *args):\n for route in args:\n app.router.add_route(route[0], route[1], route[2])", "def add_transport(self, agent):\n with self.simulation_mutex:\n self.get(\"transport_agents\")[agent.name] = agent", "def create_lrouter_in_ovn(self, router):\n\n router_name = utils.ovn_name(router['id'])\n external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:\n router.get('name', 'no_router_name')}\n enabled = router.get('admin_state_up')\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.create_lrouter(router_name,\n external_ids=external_ids,\n enabled=enabled\n ))\n # todo(zhoucx): create gw resource.\n selected_chassis = self.scheduler.select(self._ovn, self._sb_ovn, None)\n # NOTE(zhoucx) here we can ignore selected_chassis == ovn_const.OVN_GATEWAY_INVALID_CHASSIS,\n # schedule_unhosted_routers() will update it .\n gw_router_name = utils.ovn_gateway_name(router['id'])\n router_options = {'chassis': selected_chassis}\n txn.add(self._ovn.create_lrouter(gw_router_name,\n external_ids={ovn_const.OVN_GATEWAY_EXT_ID_KEY: router['name']},\n options=router_options,\n enabled=True\n ))\n # create transit switch\n transit_switch_name = 'transit-'+router['id']\n txn.add(self._ovn.create_lswitch(lswitch_name=transit_switch_name,\n external_ids={ovn_const.OVN_TRANSIT_NETWORK_EXT_ID_KEY: router['name']}\n ))\n # create\n with self._ovn.transaction(check_error=True) as txn:\n base_mac = n_cfg.CONF.base_mac.split(':')\n dvr_to_transit_port = {'mac_address': n_utils.get_random_mac(base_mac),\n 'networks': ovn_const.OVN_LROUTER_TRANSIT_PORT_NETWORK,\n 'ip_address': ovn_const.OVN_LROUTER_TRANSIT_PORT_IP}\n txn.add(self._ovn.add_lrouter_port(\n name='dvr-to-transit-%s' % router['id'],\n lrouter=router_name,\n mac=dvr_to_transit_port['mac_address'],\n networks=dvr_to_transit_port['networks']\n ))\n\n txn.add(self._ovn.create_lswitch_port(\n lport_name='transit-to-dvr-%s' % router['id'],\n lswitch_name=transit_switch_name,\n addresses=[dvr_to_transit_port['mac_address']+' ' +\n dvr_to_transit_port['ip_address']],\n external_ids=None,\n type='router'))\n gw_to_transit_port = {'mac_address': n_utils.get_random_mac(base_mac),\n 'networks': ovn_const.OVN_GATEWAY_TRANSIT_PORT_NETWORK,\n 'ip_address': ovn_const.OVN_GATEWAY_TRANSIT_PORT_IP}\n txn.add(self._ovn.add_lrouter_port(\n name='gw-to-transit-%s' % router['id'],\n lrouter=gw_router_name,\n mac=gw_to_transit_port['mac_address'],\n networks=gw_to_transit_port['networks']\n ))\n txn.add(self._ovn.create_lswitch_port(\n lport_name='transit-to-gw-%s' % router['id'],\n lswitch_name=transit_switch_name,\n addresses=[gw_to_transit_port['mac_address']+' ' +\n gw_to_transit_port['ip_address']],\n external_ids=None,\n type='router'))\n # connect them.\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.set_lrouter_port_in_lswitch_port(\n lswitch_port='transit-to-dvr-%s' % router['id'],\n lrouter_port='dvr-to-transit-%s' % router['id']))\n txn.add(self._ovn.set_lrouter_port_in_lswitch_port(\n lswitch_port='transit-to-gw-%s' % router['id'],\n lrouter_port='gw-to-transit-%s' % router['id']))", "def add_neighbours(self, router1, router2):\n router1 = self.routers[router1]\n router2 = self.routers[router2]\n\n router1.add_neighbour(router2)\n router2.add_neighbour(router1)", "def add_new(self, name):\n if name not in self.routers:\n self.routers[name] = Router(name)\n return True\n return False", "def do_add_route(self, line):\n items = line.split(' ')\n if len(items) < 3:\n log.error('route only takes at least 3 arguments: '\n 'network via_address metric')\n else:\n points = []\n i = 2\n while i < len(items):\n points.append((items[i-1], items[i]))\n i += 2\n log.critical('Add route request at %s',\n datetime.datetime.now().strftime('%H.%M.%S.%f'))\n self.fibbing.install_route(items[0], points, True)", "def extend(self, router):\n self.registry.extend(router.registry)", "def add_returned_route_on_gw(self, context, router_id, port):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip['subnet_id']\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': ovn_const.OVN_LROUTER_TRANSIT_PORT_IP}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.add_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def __init__(self, router):\n\n self.router = router", "def mountRouter(self, predicate, router):\n pass", "def create_router(self, environment, *, router=None):\n\n if router is None:\n router = self.router\n\n return utils.objects.ensure_instance(router, environment=environment)", "def append(self, agent):\n self.agents.append(agent)", "def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)", "def __init__(self, router):\n self._router = router", "def addTunnel (self, sourcemachineguid, sourceport, destinationmachineguid, destinationport, jobguid = \"\", executionparams = {}):\n params =dict()\n params['sourceport'] = sourceport\n params['destinationmachineguid'] = destinationmachineguid\n params['sourcemachineguid'] = sourcemachineguid\n params['destinationport'] = destinationport\n return q.workflowengine.actionmanager.startActorAction('ras', 'addTunnel', params, jobguid=jobguid, executionparams=executionparams)", "def add_route(self, item):\n self._routes[item.route] = item\n self.httpd.route(item.route, method=\"GET\", callback=item.get)\n self.httpd.route(item.route, method=\"POST\", callback=item.post)\n self.httpd.route(item.route, method=\"PUT\", callback=item.put)\n self.httpd.route(item.route, method=\"DELETE\", callback=item.delete)", "def add_routes(self):\n pass", "def mountRouterPath(self, path, router):\n pass", "def register_router(self, hostname, expire=-1):", "def router_interface_add(mgr_or_client, router_id, subnet_id,\n *args, **kwargs):\n net_client = _g_router_client(mgr_or_client)\n router = router_show(mgr_or_client, router_id)\n router_id = router['id']\n try:\n return net_client.add_router_interface(\n router_id,\n subnet_id=subnet_id)\n except Exception:\n pass\n try:\n return net_client.add_router_interface_with_subnbet_id(\n router_id,\n subnet_id=subnet_id)\n except Exception:\n pass\n try:\n return netclient_do(net_client,\n 'add_router_interface',\n router_id,\n subnet_id=subnet_id)\n except Exception:\n return netclient_do(net_client,\n 'add_router_interface_with_subnet_id',\n router_id,\n subnet_id=subnet_id)", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))" ]
[ "0.734532", "0.7196997", "0.68207264", "0.6697992", "0.65954566", "0.65714985", "0.6471647", "0.6417413", "0.627794", "0.61565644", "0.61321044", "0.6088554", "0.59852374", "0.59830207", "0.5965617", "0.5859502", "0.5854467", "0.5829519", "0.58202237", "0.5806709", "0.579449", "0.5788507", "0.57855105", "0.5758812", "0.5725809", "0.57127625", "0.56991565", "0.567056", "0.56315327", "0.56269544" ]
0.86837584
0
Fetches a list of all firewall rules for a tenant.
def list_firewall_rules(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('firewall_rules', self.firewall_rules_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_firewall_rules(self, server):\n server_uuid, server_instance = uuid_and_instance(server)\n\n url = f'/server/{server_uuid}/firewall_rule'\n res = self.api.get_request(url)\n\n return [\n FirewallRule(server=server_instance, **firewall_rule)\n for firewall_rule in res['firewall_rules']['firewall_rule']\n ]", "def list_firewalls(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n\r\n return self.list('firewalls', self.firewalls_path, retrieve_all,\r\n **_params)", "def get_dedicated_fwl_rules(self, firewall_id):\r\n svc = self.client['Network_Vlan_Firewall']\r\n return svc.getRules(id=firewall_id, mask=RULE_MASK)", "def list_resolver_rules(MaxResults=None, NextToken=None, Filters=None):\n pass", "def get_standard_fwl_rules(self, firewall_id):\r\n svc = self.client['Network_Component_Firewall']\r\n return svc.getRules(id=firewall_id, mask=RULE_MASK)", "def cloudflare_waf_firewall_rule_list_request(self, args: dict, page: int = None, page_size: int = None) -> Dict[str, Any]:\n\n params = remove_empty_elements({\n 'id': args.get('rule_id'),\n 'description': args.get('description'),\n 'action': args.get('action'),\n 'paused': args.get('paused'),\n 'page': page,\n 'per_page': page_size\n })\n zone_id = args.get('zone_id')\n return self._http_request(\n method='GET',\n url_suffix=f'zones/{zone_id}/firewall/rules',\n params=params)", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def get_firewalls(self):\r\n mask = ('firewallNetworkComponents,'\r\n 'networkVlanFirewall,'\r\n 'dedicatedFirewallFlag,'\r\n 'firewallGuestNetworkComponents,'\r\n 'firewallInterfaces,'\r\n 'firewallRules,'\r\n 'highAvailabilityFirewallFlag')\r\n\r\n return [firewall\r\n for firewall in self.account.getNetworkVlans(mask=mask)\r\n if has_firewall(firewall)]", "def firewalls(self) -> Sequence['outputs.SubResourceResponse']:\n return pulumi.get(self, \"firewalls\")", "def get_rules(self, **params):\n return self._make_request(\n \"GET\", f\"/2/tweets/search/stream/rules\", params=params,\n endpoint_parameters=(\"ids\",), data_type=StreamRule\n )", "def get_customer_rules(connection, customer_id):\n connection.command_path = \"customer/{0}/rules\".format(customer_id)\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n raise CustomerNotFoundException(res.content)\n body = res.content\n return customers.parse_customer_rules(body)", "def getListOfRules(self):\n return self.model.getListOfRules()", "def _get_firewall_rules(firewall_rules):\n ret = []\n for key, value in firewall_rules.items():\n # Verify the required 'protocol' property is present in the cloud\n # profile config\n if \"protocol\" not in firewall_rules[key].keys():\n raise SaltCloudConfigError(\n \"The firewall rule '{}' is missing 'protocol'\".format(key)\n )\n ret.append(\n FirewallRule(\n name=key,\n protocol=firewall_rules[key].get(\"protocol\", None),\n source_mac=firewall_rules[key].get(\"source_mac\", None),\n source_ip=firewall_rules[key].get(\"source_ip\", None),\n target_ip=firewall_rules[key].get(\"target_ip\", None),\n port_range_start=firewall_rules[key].get(\"port_range_start\", None),\n port_range_end=firewall_rules[key].get(\"port_range_end\", None),\n icmp_type=firewall_rules[key].get(\"icmp_type\", None),\n icmp_code=firewall_rules[key].get(\"icmp_code\", None),\n )\n )\n\n return ret", "def test_list_firewall_rules_sort(self):\r\n resources = \"firewall_rules\"\r\n cmd = firewallrule.ListFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def get_list_of_rules(app_stack_name):\n\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=app_stack_name,\n LogicalResourceId='ALBListenerSSL'\n )\n alb_listener = response['StackResources'][0]['PhysicalResourceId']\n\n client = boto3.client('elbv2')\n response = client.describe_rules(ListenerArn=alb_listener)\n return response['Rules']", "def get_all_rules_for_playbook(self, playbook_uuid, sort_dir=SortDirection.ASC, client_id=None):\n\n if client_id is None:\n client_id = self._use_default_client_id()[0]\n\n url = self.api_base_url.format(str(client_id)) + \"/{}/rules\".format(playbook_uuid)\n\n page_size = 1000\n\n try:\n num_pages = self._get_playbook_page_info(url, page_size)\n except RequestFailed:\n raise\n\n page_range = range(0, num_pages)\n\n search_func_params = {\n \"playbook_uuid\": playbook_uuid,\n \"page_size\": page_size,\n \"sort_dir\": sort_dir,\n \"client_id\": client_id\n }\n\n try:\n all_rules = self._fetch_in_bulk(self.get_single_page_playbook_rules, page_range, **search_func_params)\n except RequestFailed:\n raise\n\n return all_rules", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def get_rules(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetRulesV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetRulesV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def cloudflare_waf_firewall_rule_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n zone_id = args.get('zone_id', client.zone_id)\n rule_id = args.get('id')\n description = args.get('description')\n action = args.get('action')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n page = arg_to_number(args.get('page'))\n page_size = arg_to_number(args.get('page_size'))\n limit = arg_to_number(args.get('limit'))\n\n validate_pagination_arguments(page, page_size, limit)\n\n firewall_rules = []\n\n command_args = {'zone_id': zone_id, 'rule_id': rule_id, 'description': description, 'action': action, 'paused': paused}\n pagination_args = {'limit': limit, 'page': page, 'page_size': page_size}\n response, output, pagination_message = pagination(\n client.cloudflare_waf_firewall_rule_list_request, command_args, pagination_args)\n\n for fr in output:\n firewall_rules.append({'id': fr['id'], 'action': fr['action'], 'paused': fr['paused'],\n 'description': dict_safe_get(fr, ['description']), 'filter_id': fr['filter']['id'],\n 'filter_expression': fr['filter']['expression']\n })\n fr['zone_id'] = zone_id\n\n readable_output = tableToMarkdown(\n name='Firewall rule list',\n metadata=pagination_message,\n t=firewall_rules,\n headers=['id', 'action', 'paused', 'description', 'filter_id', 'filter_expression'],\n headerTransform=string_to_table_header\n )\n\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.FirewallRule',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def get_of_rules(self, translate_of_ports=True):\n\n try:\n # get rules\n if len(self.ofi2pp) == 0:\n self.obtain_port_correspondence()\n\n of_response = requests.get(self.url + \"restconf/config/opendaylight-inventory:nodes/node/\" + self.id +\n \"/table/0\", headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n\n # The configured page does not exist if there are no rules installed. In that case we return an empty dict\n if of_response.status_code == 404:\n return []\n\n elif of_response.status_code != 200:\n self.logger.warning(\"get_of_rules \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)\n\n self.logger.debug(\"get_of_rules \" + error_text)\n\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"get_of_rules. Unexpected response not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected openflow response, not a dict. Wrong version?\")\n\n table = info.get('flow-node-inventory:table')\n if not isinstance(table, list):\n self.logger.error(\"get_of_rules. Unexpected response at 'flow-node-inventory:table', \"\n \"not a list: %s\", str(type(table)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'flow-node-inventory:table', not a list. \"\n \"Wrong version?\")\n\n flow_list = table[0].get('flow')\n if flow_list is None:\n return []\n\n if not isinstance(flow_list, list):\n self.logger.error(\"get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a \"\n \"list: %s\", str(type(flow_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'flow-node-inventory:table'[0]:'flow', \"\n \"not a list. Wrong version?\")\n\n # TODO translate ports according to translate_of_ports parameter\n\n rules = [] # Response list\n for flow in flow_list:\n if not ('id' in flow and 'match' in flow and 'instructions' in flow and\n 'instruction' in flow['instructions'] and\n 'apply-actions' in flow['instructions']['instruction'][0] and\n 'action' in flow['instructions']['instruction'][0]['apply-actions']):\n raise OpenflowConnUnexpectedResponse(\"unexpected openflow response, one or more elements are \"\n \"missing. Wrong version?\")\n\n flow['instructions']['instruction'][0]['apply-actions']['action']\n\n rule = dict()\n rule['switch'] = self.dpid\n rule['priority'] = flow.get('priority')\n # rule['name'] = flow['id']\n # rule['cookie'] = flow['cookie']\n if 'in-port' in flow['match']:\n in_port = flow['match']['in-port']\n if in_port not in self.ofi2pp:\n raise OpenflowConnUnexpectedResponse(\"Error: Ingress port {} is not in switch port list\".\n format(in_port))\n\n if translate_of_ports:\n in_port = self.ofi2pp[in_port]\n\n rule['ingress_port'] = in_port\n\n if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \\\n 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \\\n 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \\\n flow['match']['vlan-match']['vlan-id']['vlan-id-present'] is True:\n rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id']\n\n if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] \\\n and 'address' in flow['match']['ethernet-match']['ethernet-destination']:\n rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address']\n\n instructions = flow['instructions']['instruction'][0]['apply-actions']['action']\n\n max_index = 0\n for instruction in instructions:\n if instruction['order'] > max_index:\n max_index = instruction['order']\n\n actions = [None]*(max_index+1)\n for instruction in instructions:\n if 'output-action' in instruction:\n if 'output-node-connector' not in instruction['output-action']:\n raise OpenflowConnUnexpectedResponse(\"unexpected openflow response, one or more elementa \"\n \"are missing. Wrong version?\")\n\n out_port = instruction['output-action']['output-node-connector']\n if out_port not in self.ofi2pp:\n raise OpenflowConnUnexpectedResponse(\"Error: Output port {} is not in switch port list\".\n format(out_port))\n\n if translate_of_ports:\n out_port = self.ofi2pp[out_port]\n\n actions[instruction['order']] = ('out', out_port)\n\n elif 'strip-vlan-action' in instruction:\n actions[instruction['order']] = ('vlan', None)\n\n elif 'set-field' in instruction:\n if not ('vlan-match' in instruction['set-field'] and\n 'vlan-id' in instruction['set-field']['vlan-match'] and\n 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']):\n raise OpenflowConnUnexpectedResponse(\"unexpected openflow response, one or more elements \"\n \"are missing. Wrong version?\")\n\n actions[instruction['order']] = ('vlan',\n instruction['set-field']['vlan-match']['vlan-id']['vlan-id'])\n\n actions = [x for x in actions if x is not None]\n\n rule['actions'] = list(actions)\n rules.append(rule)\n\n return rules\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_rules \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_rules \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)", "def list_firewall_policies(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n\r\n return self.list('firewall_policies', self.firewall_policies_path,\r\n retrieve_all, **_params)", "def list_security_group_rules(self, retrieve_all=True, **_params):\r\n return self.list('security_group_rules',\r\n self.security_group_rules_path,\r\n retrieve_all, **_params)", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def refresh_firewall(self, device_ids=None):\n if not device_ids:\n device_ids = self.firewall.ports.keys()\n if not device_ids:\n LOG.info(_LI(\"No ports here to refresh firewall.\"))\n return\n LOG.info(_LI(\"Refresh firewall rules for %s ports.\"), len(device_ids))\n self._process_port_set(set(device_ids), True)", "def get_chain_rules(data_base):\n \n business_rule_db = DB(data_base, host='business_rules_db')\n df = business_rule_db.execute(f\"SELECT `id`, `rule_id`,`rule_string`, `next_if_sucess`, `next_if_failure`, `group`, `description`, `data_source` from `sequence_rule_data`\")\n chained_rules = [[e['rule_id'], e['rule_string'], e['next_if_sucess'], e['next_if_failure'], e['group'],e['description'], e['data_source']] for e in df.to_dict(orient='records') ]\n \n return chained_rules", "def configure_firewall(self, server, firewall_rule_bodies):\n server_uuid, server_instance = uuid_and_instance(server)\n\n return [self.create_firewall_rule(server_uuid, rule) for rule in firewall_rule_bodies]", "def get_rules_for_category(category):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE category_id = ?', (category,)).fetchall()\n\n return rules", "def lists_rule(self, id: str, filter: str = None) -> dict:\n r = requests.get(self.url + '/{}/rules'.format(id), headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()" ]
[ "0.6232312", "0.6231069", "0.61562014", "0.58896816", "0.58752656", "0.5728168", "0.5700419", "0.5683381", "0.56574285", "0.55847776", "0.55532306", "0.5510661", "0.5491232", "0.54786634", "0.53851366", "0.53546613", "0.5225483", "0.5210484", "0.5197971", "0.51920617", "0.5183596", "0.5179728", "0.51637226", "0.5138854", "0.5090271", "0.5036062", "0.49519193", "0.4944132", "0.48817495", "0.48742974" ]
0.702476
0
Fetches information of a certain firewall rule.
def show_firewall_rule(self, firewall_rule, **_params): return self.get(self.firewall_rule_path % (firewall_rule), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def get_firewall_rule(self, server_uuid, firewall_rule_position, server_instance=None):\n url = f'/server/{server_uuid}/firewall_rule/{firewall_rule_position}'\n res = self.api.get_request(url)\n return FirewallRule(**res['firewall_rule'])", "def get_firewall_rule(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_firewall_rule(\n name_or_id, ignore_missing=True, **filters\n )", "def get_rule(self):\n\n return self.__rule_", "def get_rule(rule_id):\n\n rule = get_db().execute('SELECT i.*, c.name as category_name FROM ruleset i JOIN categories c ON i.category_id = c.id WHERE i.id = ?', (rule_id, )).fetchone()\n\n return rule", "def get_firewall(client, firewall_name):\n response = client.describe_firewall(\n FirewallName=firewall_name,\n )\n return response", "def get_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n return self.rules[rule_name]", "def find(self, rule_name):\n return self.rules[rule_name]", "def show_firewall(self, firewall, **_params):\r\n return self.get(self.firewall_path % (firewall), params=_params)", "def get_rule(self, ruleset_id):\n #access undocumented rule API\n url = self.API_URL % ('public_rulesets/', ruleset_id, '') \n return requests.get(url=url, headers=self.headers, proxies=self.proxies, verify=self.verify_ssl)", "def get_snat_rule(self, snatrule):\n return self._get(_snat.Rule, snatrule)", "def get_resolver_rule(ResolverRuleId=None):\n pass", "def get(self, request, l7_rule_id, l7_policy_id):\n conn = get_sdk_connection(request)\n l7_rule = conn.load_balancer.find_l7_rule(l7_rule_id, l7_policy_id)\n return _get_sdk_object_dict(l7_rule)", "def get_rule(self):\n return self.rule.state_dict()", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"end_ip\"] = None\n __props__[\"name\"] = None\n __props__[\"start_ip\"] = None\n __props__[\"type\"] = None\n return FirewallRule(resource_name, opts=opts, __props__=__props__)", "def show_rule(bot, trigger):\n if not trigger.group(2):\n bot.say('.rule <id> - The Internet has no rules. No Exceptions.')\n return\n\n rule = trigger.group(2).strip()\n\n if not trigger.sender.is_nick() and bot.privileges[trigger.sender][bot.nick] >= HALFOP:\n if rule == \"WAMM\" or rule == \"asie\":\n bot.write(['KICK', trigger.sender, trigger.nick], text='The kick is the rule')\n return\n\n if rule.strip() in rules:\n bot.say('Rule {0}: \"{1}\"'.format(rule, rules[rule]))\n else:\n bot.say('Rule {0} is a lie.'.format(rule))", "def get_dedicated_fwl_rules(self, firewall_id):\r\n svc = self.client['Network_Vlan_Firewall']\r\n return svc.getRules(id=firewall_id, mask=RULE_MASK)", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def get_rule(self, name):\n\n return self._control_manager.get_rule(name)" ]
[ "0.66064155", "0.66064155", "0.66064155", "0.6383028", "0.63234866", "0.62817323", "0.6250253", "0.6178156", "0.6120374", "0.61120397", "0.607358", "0.60636246", "0.60278934", "0.5957132", "0.5920766", "0.5900916", "0.58918655", "0.58585995", "0.58002335", "0.5760116", "0.5760116", "0.5760116", "0.5760116", "0.5760116", "0.5760116", "0.5760116", "0.57479984", "0.57479984", "0.57479984", "0.57306165" ]
0.75909406
0
Creates a new firewall rule.
def create_firewall_rule(self, body=None): return self.post(self.firewall_rules_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_firewall_rule(self, server, firewall_rule_body):\n server_uuid, server_instance = uuid_and_instance(server)\n\n url = f'/server/{server_uuid}/firewall_rule'\n body = {'firewall_rule': firewall_rule_body}\n res = self.api.post_request(url, body)\n\n return FirewallRule(server=server_instance, **res['firewall_rule'])", "def create_firewall_rule(self, ipaddressid, protocol, cidrlist, ftype, \n startport=None, endport=None,\n icmpcode=None, icmptype=None): \n params = {'command':'createFirewallRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'cidrlist':cidrlist,\n 'type':ftype}\n\n if startport:\n params['startport'] = startport\n if endport:\n params['endport'] = endport\n if icmpcode:\n params['icmpcode'] = icmpcode\n if icmptype:\n params['icmptype'] = icmptype \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createfirewallruleresponse']['jobid']\n self.logger.debug('Start job - createfirewallruleresponse: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def create_firewall_rule(project):\n listed_rules = subprocess.check_output(\n ['gcloud', 'compute', 'firewall-rules', 'list',\n '--format', 'value(name)',\n '--filter', 'name=%s' % LEO_FIREWALL_RULE,\n '--project', project])\n if LEO_FIREWALL_RULE in listed_rules:\n return\n Print.GN('Creating firewall rule for Leonardo VM.')\n subprocess.check_call(\n ['gcloud', 'compute', 'firewall-rules', 'create',\n LEO_FIREWALL_RULE,\n '--allow', 'tcp:80,tcp:443',\n '--priority', '900',\n '--target-tags', LEO_FIREWALL_RULE,\n '--project', project])", "def create_firewall(self, body=None):\r\n return self.post(self.firewalls_path, body=body)", "def _setup_create_firewall_rule_with_all_params(self, protocol='tcp'):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n source_ip = '192.168.1.0/24'\r\n destination_ip = '192.168.2.0/24'\r\n source_port = '0:65535'\r\n destination_port = '0:65535'\r\n action = 'allow'\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--protocol', protocol,\r\n '--source-ip-address', source_ip,\r\n '--destination-ip-address', destination_ip,\r\n '--source-port', source_port,\r\n '--destination-port', destination_port,\r\n '--action', action,\r\n '--enabled',\r\n '--admin-state-up',\r\n '--tenant-id', tenant_id]\r\n position_names = []\r\n position_values = []\r\n if protocol == 'any':\r\n protocol = None\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n protocol=protocol,\r\n source_ip_address=source_ip,\r\n destination_ip_address=destination_ip,\r\n source_port=source_port,\r\n destination_port=destination_port,\r\n action=action, enabled=True,\r\n tenant_id=tenant_id)", "def Create(self,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.AddRule(\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Adding a rule to the organization firewall policy.')", "def create_rule(self, id: str, start_port: str, protocol: str = 'tcp', end_port: str = None,\n cidr: str = '0.0.0.0/0',\n direction: str = 'inbound', label: str = None) -> dict:\n payload = {'protocol': protocol, 'start_port': start_port, 'cidr': cidr, 'direction': direction}\n\n if end_port:\n payload['end_port'] = end_port\n\n if label:\n payload['label'] = label\n\n r = requests.post(self.url + '/{}/rules'.format(id), headers=self.headers, params=payload)\n\n return r.json()", "def create_rule(self: object,\n body: dict,\n cs_username: str = None # pylint: disable=W0613 # cs_username is deprecated\n ) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"create_rule\",\n body=body\n )", "def create_snat_rule(self, **attrs):\n return self._create(_snat.Rule, **attrs)", "def create_firewall_policy(self, body=None):\r\n return self.post(self.firewall_policies_path, body=body)", "def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def post(self):\n args = self.reqparse.parse_args()\n # check for the rule things\n rule = Rule(port=args['port'], protocol=args['protocol'], \\\n action=args['action'])\n ip = args.get('ip', None)\n mac = args.get('mac', None)\n if not (ip == None):\n rule.ip = IP(ip=ip.ip, ipv4=ip.ipv4)\n if not (mac == None):\n rule.mac = MAC(mac=mac.mac)\n\n session.add(rule)\n session.commit()\n return [], 200", "def add_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n rule = IPPermissions(self)\r\n rule.ip_protocol = ip_protocol\r\n rule.from_port = from_port\r\n rule.to_port = to_port\r\n self.rules.append(rule)\r\n rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)", "def create_port_forward_rule(self, ipaddressid, protocol, virtualmachineid,\n privateport, privateendport,\n publicport, publicendport): \n params = {'command':'createPortForwardingRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'privateport':privateport,\n 'privateendport':privateendport,\n 'publicport':publicport,\n 'publicendport':publicendport,\n 'virtualmachineid':virtualmachineid,\n 'openfirewall':False} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createportforwardingruleresponse']['jobid']\n self.logger.debug('Start job - createPortForwardingRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def cloudflare_waf_firewall_rule_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n action = args['action']\n zone_id = args.get('zone_id', client.zone_id)\n filter_id = args.get('filter_id')\n filter_expression = args.get('filter_expression')\n products = argToList(args.get('products'))\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n priority = arg_to_number(args.get('priority'))\n ref = args.get('ref')\n\n response = client.cloudflare_waf_firewall_rule_create_request(\n action, zone_id,\n description=description, products=products, paused=paused, priority=priority, ref=ref,\n filter_id=filter_id, filter_expression=filter_expression)\n\n output = response['result']\n firewall_rule_output = output[0]\n\n firewall_rule = [{'id': dict_safe_get(firewall_rule_output, ['id']),\n 'action': dict_safe_get(firewall_rule_output, ['action']),\n 'paused': dict_safe_get(firewall_rule_output, ['paused']),\n 'description': dict_safe_get(firewall_rule_output, ['description']),\n 'filter_id': dict_safe_get(firewall_rule_output, ['filter', 'id']),\n 'filter_expression': dict_safe_get(firewall_rule_output, ['filter', 'expression']),\n 'products': dict_safe_get(firewall_rule_output, ['products']),\n 'ref': dict_safe_get(firewall_rule_output, ['ref']),\n 'priority': dict_safe_get(firewall_rule_output, ['priority']),\n 'zone_id': zone_id}]\n\n readable_output = tableToMarkdown(\n name='Firewall rule was successfully created.',\n t=firewall_rule,\n headers=['id', 'action', 'filter_id', 'filter_expression', 'products', 'priority', 'paused', 'description', 'ref'],\n headerTransform=string_to_table_header\n )\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.FirewallRule',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def create_security_group_rule(self, body=None):\r\n return self.post(self.security_group_rules_path, body=body)", "def cloudflare_waf_firewall_rule_create_request(self, action: str, zone_id: str, description: str = None,\n products: List[str] = None, paused: bool = None, priority: int = None,\n ref: str = None, filter_id: int = None,\n filter_expression: str = None) -> Dict[str, Any]:\n params = remove_empty_elements({\n 'description': description,\n 'products': products,\n 'action': action,\n 'paused': paused,\n 'priority': priority,\n 'ref': ref,\n 'filter': {'id': filter_id, 'expression': filter_expression}\n })\n return self._http_request(\n method='POST',\n url_suffix=f'zones/{zone_id}/firewall/rules',\n json_data=[params])", "def create_firewall(context):\n return [{\n 'type': 'templates/firewall.py',\n 'name': 'fc-firewall',\n 'properties': {\n 'projectId':\n '$(ref.fc-project.projectId)',\n 'network':\n '$(ref.fc-network.selfLink)',\n 'dependsOn':\n '$(ref.fc-network.resourceNames)',\n 'rules': [\n {\n 'name': 'allow-internal',\n 'description': 'Allow internal traffic on the network.',\n 'allowed': [{\n 'IPProtocol': 'icmp',\n }, {\n 'IPProtocol': 'tcp',\n 'ports': ['0-65535'],\n }, {\n 'IPProtocol': 'udp',\n 'ports': ['0-65535'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['10.128.0.0/9'],\n 'priority': 65534,\n },\n {\n 'name': 'leonardo-ssl',\n 'description': 'Allow SSL traffic from Leonardo-managed VMs.',\n 'allowed': [{\n 'IPProtocol': 'tcp',\n 'ports': ['443'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['0.0.0.0/0'],\n 'targetTags': ['leonardo'],\n },\n ],\n },\n }]", "def _test_create_firewall_rule_with_mandatory_params(self, enabled):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n tenant_id = 'my-tenant'\r\n name = ''\r\n my_id = 'myid'\r\n protocol = 'tcp'\r\n action = 'allow'\r\n enabled_flag = '--enabled' if enabled else '--disabled'\r\n args = ['--tenant-id', tenant_id,\r\n '--admin-state-up',\r\n '--protocol', protocol,\r\n '--action', action,\r\n enabled_flag]\r\n position_names = []\r\n position_values = []\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n protocol=protocol, action=action,\r\n enabled=enabled, tenant_id=tenant_id)", "def test_create_update_delete_firewall_rule(self):\n ctx = context.get_admin_context()\n clnt = self.plugin._client\n with self.firewall_rule(do_delete=False) as fwr:\n fwr_id = fwr['firewall_rule']['id']\n # Create Firewall Rule\n crd_rule = {'firewall_rule': fwr}\n clnt.create_firewall_rule.assert_called_once_with(fwr)\n # Update Firewall Rule\n data = {'firewall_rule': {'name': 'new_rule_name',\n 'source_port': '10:20',\n 'destination_port': '30:40'}}\n fw_rule = self.plugin.update_firewall_rule(ctx, fwr_id, data)\n crd_rule = {'firewall_rule': fw_rule}\n clnt.update_firewall_rule.assert_called_once_with(fwr_id, crd_rule)\n # Delete Firewall Rule\n self.plugin.delete_firewall_rule(ctx, fwr_id)\n clnt.delete_firewall_rule.assert_called_once_with(fwr_id)", "def CreateRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateRule\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def test_creation(client_rule_factory, configured_flask_client):\n preparator = RuleFactoryPreparator(client_rule_factory)\n preparator.prepare_method_rule(method_rule_class=MethodRule)\n\n rule_spec = MethodRule(method=\"PUT\")\n rule = configured_flask_client.create_rule(rule=rule_spec)\n\n assert rule.rule_id is not None, \"Rule was not created\"\n assert rule.method == rule_spec.method, \"Wrong method\"", "def test_create_firewall_with_mandatory_params(self):\r\n resource = 'firewall'\r\n cmd = firewall.CreateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n name = ''\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n policy_id = 'my-policy-id'\r\n args = ['--tenant-id', tenant_id, policy_id, ]\r\n position_names = ['firewall_policy_id', ]\r\n position_values = [policy_id, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True, tenant_id=tenant_id)", "def test_create_firewall_with_all_params(self):\r\n resource = 'firewall'\r\n cmd = firewall.CreateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n policy_id = 'my-policy-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--description', description,\r\n '--shared',\r\n '--admin-state-down',\r\n '--tenant-id', tenant_id,\r\n policy_id]\r\n position_names = ['firewall_policy_id', ]\r\n position_values = [policy_id, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description,\r\n shared=True, admin_state_up=False,\r\n tenant_id=tenant_id)", "def create_acl_rule(self, context, sgr):\n self.security_group_driver.create_acl_rule(context, sgr)", "def rule_create(self, parent_group_id,\n direction=None, ethertype=None,\n ip_protocol=None, from_port=None, to_port=None,\n cidr=None, group_id=None, description=None):\n if not cidr:\n cidr = None\n if isinstance(from_port, int) and from_port < 0:\n from_port = None\n if isinstance(to_port, int) and to_port < 0:\n to_port = None\n if isinstance(ip_protocol, int) and ip_protocol < 0:\n ip_protocol = None\n\n params = {'security_group_id': parent_group_id,\n 'direction': direction,\n 'ethertype': ethertype,\n 'protocol': ip_protocol,\n 'port_range_min': from_port,\n 'port_range_max': to_port,\n 'remote_ip_prefix': cidr,\n 'remote_group_id': group_id}\n if description is not None:\n params['description'] = description\n body = {'security_group_rule': params}\n try:\n rule = self.client.create_security_group_rule(body)\n except neutron_exc.OverQuotaClient:\n raise exceptions.Conflict(\n _('Security group rule quota exceeded.'))\n except neutron_exc.Conflict:\n raise exceptions.Conflict(\n _('Security group rule already exists.'))\n rule = rule.get('security_group_rule')\n sg_dict = self._sg_name_dict(parent_group_id, [rule])\n return SecurityGroupRule(rule, sg_dict)", "def make_rule(name, seq_id, action, protocol, src_ip, src_mask, dst_ip,\n dst_mask, sport_operator, sport_low, sport_high,\n dport_operator, dport_low, dport_high, count, log, dscp):\n xml_tring = template.IP_ACL_RULE.format()\n the_config = etree.fromstring(xml_tring)\n remove_unused_tags(the_config, name, action, protocol, src_ip, dst_ip,\n sport_operator, (sport_low, sport_high), dport_operator,\n (dport_low, dport_high), count, log, dscp)\n\n for elt in the_config.iterdescendants():\n if elt.tag == ('seq-id'):\n add_text_to_ele(elt, seq_id)\n elif elt.tag == ('action'):\n add_text_to_ele(elt, action)\n elif elt.tag == ('protocol-type'):\n add_text_to_ele(elt, protocol)\n elif elt.tag == ('src-host-any-sip'):\n add_text_to_ele(elt, src_ip)\n elif elt.tag == ('src-mask'):\n add_text_to_ele(elt, src_mask)\n elif elt.tag == ('dst-host-any-dip'):\n add_text_to_ele(elt, dst_ip)\n elif elt.tag == ('dst-mask'):\n add_text_to_ele(elt, dst_mask)\n elif elt.tag == ('sport'):\n add_text_to_ele(elt, sport_operator)\n elif \"sport-number-eq-neq\" in elt.tag:\n add_text_to_ele(elt, sport_low)\n elif \"sport-number-range-lower\" in elt.tag:\n add_text_to_ele(elt, sport_low)\n elif \"sport-number-range-higher\" in elt.tag:\n add_text_to_ele(elt, sport_high)\n elif elt.tag == ('dport'):\n add_text_to_ele(elt, dport_operator)\n elif \"dport-number-eq-neq\" in elt.tag:\n add_text_to_ele(elt, dport_low)\n elif \"dport-number-range-lower\" in elt.tag:\n add_text_to_ele(elt, dport_low)\n elif \"dport-number-range-higher\" in elt.tag:\n add_text_to_ele(elt, dport_high)\n elif \"dscp\" in elt.tag:\n add_text_to_ele(elt, dscp)\n\n xml_request = etree.tostring(the_config, pretty_print=True)\n return xml_request", "def create_egress_rule(self, ipaddressid, protocol, cidrlist, ftype, \n startport=None, endport=None,\n icmpcode=None, icmptype=None): \n params = {'command':'createEgressFirewallRule',\n 'networkid':self.id,\n 'protocol':protocol,\n 'cidrlist':cidrlist,\n 'type':ftype}\n\n if startport:\n params['startport'] = startport\n if endport:\n params['endport'] = endport\n if icmpcode:\n params['icmpcode'] = icmpcode\n if icmptype:\n params['icmptype'] = icmptype \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createegressfirewallruleresponse']['jobid']\n self.logger.debug('Start job - createEgressFirewallRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def add_rule(self, rule):\n \n self.rules.append(rule)", "def create_rule(self, security_group, direction, ethertype=None,\n port_range_min=None, port_range_max=None, protocol=None,\n remote_group_id=None, remote_ip_prefix=None):\n data = {\n \"security_group_rule\": {\n \"direction\": direction,\n \"protocol\": protocol,\n \"security_group_id\": security_group\n }\n }\n if remote_ip_prefix is not None:\n data['security_group_rule'].update({\"port_range_min\":port_range_min,\n \"port_range_max\":port_range_max,\n \"ethertype\":ethertype,\n \"remote_ip_prefix\":remote_ip_prefix})\n elif remote_group_id is not None:\n data['security_group_rule'].update({\"port_range_min\":port_range_min,\n \"port_range_max\":port_range_max,\n \"ethertype\":ethertype,\n \"remote_group_id\":remote_group_id})\n\n path = '%s/security-group-rules' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group %s rule: %s' % \n (security_group, truncate(res)))\n return res[0]['security_group_rule']" ]
[ "0.75746644", "0.73016423", "0.72972524", "0.7051592", "0.67895025", "0.6624186", "0.6598076", "0.65394807", "0.6520667", "0.6457384", "0.6382312", "0.63332236", "0.6333125", "0.6326056", "0.62172323", "0.6191648", "0.6160085", "0.615451", "0.61526835", "0.6136764", "0.60725296", "0.6044395", "0.6036726", "0.60312456", "0.602165", "0.6007166", "0.5973892", "0.59662306", "0.5948106", "0.5947921" ]
0.8462622
0
Updates a firewall rule.
def update_firewall_rule(self, firewall_rule, body=None): return self.put(self.firewall_rule_path % (firewall_rule), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_firewall_rule(self):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.UpdateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def test_update_firewall_rule_protocol(self):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.UpdateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--protocol', 'any'],\r\n {'protocol': None, })", "def cloudflare_waf_firewall_rule_update_request(self, rule_id: str, filter_id: str, zone_id: str, action: str,\n description: str = None, products: List[str] = None, paused: bool = None,\n priority: int = None, ref: str = None) -> Dict[str, Any]:\n params = remove_empty_elements({\n 'id': rule_id,\n 'description': description,\n 'products': products,\n 'action': action,\n 'paused': paused,\n 'priority': priority,\n 'ref': ref,\n 'filter': {'id': filter_id}\n })\n\n return self._http_request(\n method='PUT',\n url_suffix=f'zones/{zone_id}/firewall/rules',\n json_data=[params])", "def put(self, request, *args, **kwargs):\n try:\n new_rule = json.loads(request.body)\n except Exception as e:\n return error('unable to marshal json', str(e))\n try:\n validate_rule_json(new_rule)\n except RuleValidationException as e:\n return error('error validating json', str(e))\n rule = Rule()\n rule.populate(new_rule)\n rule.save()\n return success(rule.summary())", "def Update(self,\n priority=None,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeUpdateRuleRequestTuple(\n priority=priority,\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.PatchRule(\n self._MakeUpdateRuleRequestTuple(\n priority=priority,\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Updating a rule in the organization firewall policy.')", "def put(self, request, l7_rule_id, l7_policy_id):\n kwargs = {'l7_rule_id': l7_rule_id, 'l7_policy_id': l7_policy_id}\n update_l7_rule(request, **kwargs)", "def _UpdateAclRule(self, entry):\n\n print 'Update Acl rule: %s' % (entry.GetEditLink().href)\n roleValue = \"http://schemas.google.com/gCal/2005#%s\" % (\"read\")\n entry.role = gdata.acl.data.AclRole(value=roleValue)\n returned_rule = self.cal_client.Update(entry)", "def cloudflare_waf_firewall_rule_update_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n rule_id = args['id']\n zone_id = args.get('zone_id', client.zone_id)\n action = args.get('action')\n filter_id = args.get('filter_id')\n products = args.get('products')\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n priority = arg_to_number(args.get('priority'))\n ref = args.get('ref')\n\n response = client.cloudflare_waf_firewall_rule_update_request(\n rule_id, filter_id, zone_id, action, description=description, # type: ignore\n products=products, paused=paused, priority=priority, ref=ref)\n\n output = response['result']\n\n return CommandResults(\n readable_output=f'Firewall rule {rule_id} was successfully updated.',\n outputs_prefix='CloudflareWAF.FirewallRule',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def bandwidth_limit_rule_update(request, policy_id, rule_id, **kwargs):\n body = {'bandwidth_limit_rule': kwargs}\n ruleType = 'bandwidth_limit_rule'\n bandwidthlimit_update = neutronclient(request)\\\n .update_bandwidth_limit_rule(rule_id, policy_id, body)\\\n .get(ruleType)\n return BandwidthLimitRule(bandwidthlimit_update)", "def edit_dedicated_fwl_rules(self, firewall_id, rules):\r\n mask = ('mask[networkVlan[firewallInterfaces'\r\n '[firewallContextAccessControlLists]]]')\r\n svc = self.client['Network_Vlan_Firewall']\r\n fwl = svc.getObject(id=firewall_id, mask=mask)\r\n network_vlan = fwl['networkVlan']\r\n\r\n for fwl1 in network_vlan['firewallInterfaces']:\r\n if fwl1['name'] == 'inside':\r\n continue\r\n for control_list in fwl1['firewallContextAccessControlLists']:\r\n if control_list['direction'] == 'out':\r\n continue\r\n fwl_ctx_acl_id = control_list['id']\r\n\r\n template = {\r\n 'firewallContextAccessControlListId': fwl_ctx_acl_id,\r\n 'rules': rules\r\n }\r\n\r\n svc = self.client['Network_Firewall_Update_Request']\r\n return svc.createObject(template)", "def edit_standard_fwl_rules(self, firewall_id, rules):\r\n rule_svc = self.client['Network_Firewall_Update_Request']\r\n template = {\r\n \"networkComponentFirewallId\": firewall_id,\r\n \"rules\": rules}\r\n\r\n return rule_svc.createObject(template)", "def update_resolver_rule(ResolverRuleId=None, Config=None):\n pass", "def edit_rule(self, rule_number, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tif len(self._blacklist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\told_rule = self._blacklist_rules.pop(rule_number)\n\t\t\tself._blacklist_rules.append(rule)\n\t\t\tself._log.info('Replaced rule from the blacklist rules set: \\n old: %s\\n new: %s' % (old_rule, rule))\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tif len(self._whitelist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\told_rule = self._whitelist_rules.pop(rule_number)\n\t\t\tself._whitelist_rules.append(rule)\n\t\t\tself._log.info('Replaced rule from the whitelist rules set: \\n old: %s\\n new: %s' % (old_rule, rule))\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()\n\t\treturn old_rule", "def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def test_create_update_delete_firewall_rule(self):\n ctx = context.get_admin_context()\n clnt = self.plugin._client\n with self.firewall_rule(do_delete=False) as fwr:\n fwr_id = fwr['firewall_rule']['id']\n # Create Firewall Rule\n crd_rule = {'firewall_rule': fwr}\n clnt.create_firewall_rule.assert_called_once_with(fwr)\n # Update Firewall Rule\n data = {'firewall_rule': {'name': 'new_rule_name',\n 'source_port': '10:20',\n 'destination_port': '30:40'}}\n fw_rule = self.plugin.update_firewall_rule(ctx, fwr_id, data)\n crd_rule = {'firewall_rule': fw_rule}\n clnt.update_firewall_rule.assert_called_once_with(fwr_id, crd_rule)\n # Delete Firewall Rule\n self.plugin.delete_firewall_rule(ctx, fwr_id)\n clnt.delete_firewall_rule.assert_called_once_with(fwr_id)", "def test_update_firewall(self):\r\n resource = 'firewall'\r\n cmd = firewall.UpdateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def minimum_bandwidth_rule_update(request, policy_id, rule_id, **kwargs):\n body = {'minimum_bandwidth_rule': kwargs}\n ruleType = 'minimum_bandwidth_rule'\n minbandwidth_update = neutronclient(request)\\\n .update_minimum_bandwidth_rule(rule_id, policy_id, body)\\\n .get(ruleType)\n return MinimumBandwidthRule(minbandwidth_update)", "def update_rules():\n update_all_rules()\n return \"OK\"", "def create_firewall_rule(self, body=None):\r\n return self.post(self.firewall_rules_path, body=body)", "def update_l7_rule(request, **kwargs):\n data = request.DATA\n l7_rule_id = data['l7rule'].get('id')\n\n conn = get_sdk_connection(request)\n l7_rule = conn.load_balancer.update_l7_rule(\n admin_state_up=data['l7rule'].get('admin_state_up'),\n compare_type=data['l7rule']['compare_type'],\n invert=data['l7rule'].get('invert'),\n key=data['l7rule'].get('key'),\n l7_policy=kwargs['l7_policy_id'],\n l7rule=l7_rule_id,\n type=data['l7rule']['type'],\n rule_value=data['l7rule']['rule_value'],\n )\n\n return _get_sdk_object_dict(l7_rule)", "def update_firewall_policy(self, firewall_policy, body=None):\r\n return self.put(self.firewall_policy_path % (firewall_policy),\r\n body=body)", "def test_update_firewall_policy(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def ModifyRuleAttribute(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyRuleAttribute\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyRuleAttributeResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def Update(self,\n fp_id=None,\n only_generate_request=False,\n firewall_policy=None,\n batch_mode=False):\n\n if batch_mode:\n requests = [\n self._MakeUpdateRequestTuple(\n fp_id=fp_id, firewall_policy=firewall_policy)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Patch(\n self._MakeUpdateRequestTuple(\n fp_id=fp_id, firewall_policy=firewall_policy)[2])\n return self.WaitOperation(\n op_res, message='Updating the organization firewall policy.')", "def update_rules(self: object,\n body: dict,\n cs_username: str = None # pylint: disable=W0613 # cs_username is deprecated\n ) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rules\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"update_rules\",\n body=body\n )", "def edit_rules():\n my_rules = rules.get_all_rules()\n my_rules.append(DEFAULT_RULE)\n\n selected_rule_id = select(\n label=\"Existing rules\",\n options=[{\"label\": rule[\"name\"], \"value\": rule[\"id\"]} for rule in my_rules],\n )\n # Rules have unique IDs from the database:\n logging.info(f\"selected_rule: {selected_rule_id}\")\n use_rule = [r for r in my_rules if r[\"id\"] == int(selected_rule_id)][0]\n updated_rule = input_group(\n \"Rule editing\",\n [\n input(\n \"name\", type=TEXT, name=\"name\", value=use_rule[\"name\"], required=True\n ), # Need ttextarea(\n textarea(\n \"Rule names\",\n name=\"rule\",\n rows=10,\n code={\n \"mode\": \"python\", # code language\n \"theme\": \"darcula\", # Codemirror theme. Visit https://codemirror.net/demo/theme.html#cobalt to get more themes\n },\n value=f\"\"\"{use_rule['rule']}\\n\"\"\",\n ),\n actions(\n \"actions\",\n [\n # {\"label\": \"test\", \"value\": \"test\"},\n {\"label\": \"save\", \"value\": \"save\"},\n ],\n name=\"action\",\n help_text=\"Save\",\n ),\n ],\n )\n if updated_rule is not None:\n rl = dict(updated_rule)\n if rl[\"action\"] == \"save\":\n rule_info = rules.save_rule(\n rl[\"name\"], rl[\"rule\"], selected_rule_id\n )\n put_row(put_text(\"Rule\"))\n put_row(put_code(pprint.pformat(rule_info, indent=1)))\n # Use webhook_info's ID to add/update the extractor\n\n put_text(f\"The rule added is: {updated_rule}\")", "def dscp_marking_rule_update(request, policy_id, rule_id, **kwargs):\n\n body = {'dscp_marking_rule': kwargs}\n ruleType = 'dscp_marking_rule'\n dscpmarking_update = neutronclient(request)\\\n .update_dscp_marking_rule(rule_id, policy_id, body).get(ruleType)\n return DSCPMarkingRule(dscpmarking_update)", "def update(self,\n section_id,\n rule_id,\n service_insertion_rule,\n ):\n return self._invoke('update',\n {\n 'section_id': section_id,\n 'rule_id': rule_id,\n 'service_insertion_rule': service_insertion_rule,\n })", "def post(self, request, *args, **kwargs):\n rule = self.get_object()\n try:\n updates = json.loads(request.body.decode('utf-8'))\n except Exception as e:\n return error('unable to marshal json', str(e))\n try:\n validate_rule_json(updates)\n except RuleValidationException as e:\n return error('error validating json', str(e))\n\n # TODO this can take place in the save method on Rule, which would also\n # cover creation and deletion.\n change = RuleChange(\n rule=rule,\n change_user=updates['user'],\n change_comment=updates['comment'])\n change.populate(rule.full_values())\n if rule.enabled and not updates['enabled']:\n change.change_type = 'd'\n else:\n change.change_type = 'u'\n change.save()\n rule.populate(updates)\n rule.save()\n return success({\n 'rule': rule.summary(),\n 'change': change.summary(),\n })", "def set_rule(self, rule):\n self.rule.load_state_dict(rule, strict=True)" ]
[ "0.7676399", "0.7126098", "0.69864595", "0.6753691", "0.67512226", "0.66830754", "0.6529023", "0.65162325", "0.64459217", "0.6367014", "0.63591367", "0.63147587", "0.6300518", "0.62627393", "0.62527394", "0.6206311", "0.6127455", "0.6119819", "0.61131346", "0.6089892", "0.6029194", "0.60226774", "0.5964637", "0.5913811", "0.5895666", "0.5815251", "0.57465494", "0.57229733", "0.5722782", "0.56798124" ]
0.8285445
0
Deletes the specified firewall rule.
def delete_firewall_rule(self, firewall_rule): return self.delete(self.firewall_rule_path % (firewall_rule))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_firewall_rule(self, server_uuid, firewall_rule_position):\n url = f'/server/{server_uuid}/firewall_rule/{firewall_rule_position}'\n return self.api.delete_request(url)", "def cloudflare_waf_firewall_rule_delete_request(self, rule_id: str, zone_id: str) -> Dict[str, Any]:\n return self._http_request(\n method='DELETE',\n url_suffix=f'zones/{zone_id}/firewall/rules',\n params={'id': rule_id})", "def delete_firewall_rule(self, firewall_rule_id): \n params = {'command':'deleteFirewallRule',\n 'id':firewall_rule_id} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletefirewallruleresponse']['jobid']\n self.logger.debug('Start job - deleteFirewallRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))", "def cloudflare_waf_firewall_rule_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n rule_id = args['id']\n zone_id = args.get('zone_id', client.zone_id)\n\n response = client.cloudflare_waf_firewall_rule_delete_request(rule_id, zone_id)\n\n return CommandResults(\n readable_output=f'Firewall rule {rule_id} was successfully deleted.',\n raw_response=response\n )", "def delete_rule(self, ruleid):\n path = '%s/security-group-rules/%s' % (self.ver, ruleid)\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group rule %s: %s' % \n (ruleid, truncate(res)))\n return res[0]", "def _delete_rule(cls, rule_suffix: str) -> None:\n delete_rule = cls._build_rule_string(IpTableCommandOption.DELETE, rule_suffix)\n log.info('Delete rule \"%s\"', delete_rule)\n utils.run_command(delete_rule, shell=True)", "def delete_resolver_rule(ResolverRuleId=None):\n pass", "def Delete(self,\n priority=None,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveRule(\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting a rule from the organization firewall policy.')", "def delete_rule(uuid):\n with session_for_write() as session:\n stmt = (\n delete(\n model.RuleAction\n ).where(\n model.RuleAction.rule == uuid\n ).execution_options(synchronize_session=False)\n )\n session.execute(stmt)\n\n stmt = (\n delete(\n model.RuleCondition\n ).where(\n model.RuleCondition.rule == uuid\n ).execution_options(synchronize_session=False)\n )\n session.execute(stmt)\n\n stmt = (\n delete(\n model.Rule\n ).where(\n model.Rule.uuid == uuid\n ).execution_options(synchronize_session=False)\n )\n res = session.execute(stmt)\n if res.rowcount == 0:\n raise utils.RuleNotFoundError(uuid)", "def delete_rule(self, id: str, rule_id: str) -> dict:\n r = requests.delete(self.url + '/{}/rules/{}'.format(id, rule_id), headers=self.headers)\n\n return r.json()", "def delete_firewall_rule(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_rule = self.network.find_firewall_rule(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_rule(\n firewall_rule, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall rule %s not found for deleting', name_or_id\n )\n return False\n return True", "def test_esg_firewall_rule_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|id', esg_firewall.delete,\n {'rule': {\n 'esg_id': 'esg_id'\n }},\n ['firewallRule'], {\n 'uri_parameters': {'edgeId': 'esg_id', 'ruleId': 'id'}\n },\n additional_params=['rule_id']\n )", "def delete_rule(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-D\"] + rule\n return call(cmdline)", "def delete_rule(self, index):\n del self.rules[index]", "def dscp_marking_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_dscp_marking_rule(rule_id, policy_id)", "def bandwidth_limit_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_bandwidth_limit_rule(rule_id, policy_id)", "def DeleteRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def firewall_policy_remove_rule(self, firewall_policy, body=None):\r\n return self.put(self.firewall_policy_remove_path % (firewall_policy),\r\n body=body)", "def delete_snat_rule(self, rule, ignore_missing=True):\n return self._delete(_snat.Rule, rule, ignore_missing=ignore_missing)", "def handle_delete(self, request, user, *args, **kwargs):\n try:\n\n self.log.info('Delete rule from an environment')\n\n # User permission\n if not has_perm(user, AdminPermission.VIP_VALIDATION, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n id_rule = kwargs.get('id_rule')\n\n if not is_valid_int_greater_zero_param(id_rule):\n self.log.error(\n u'The id_rule parameter is not a valid value: %s.', id_rule)\n raise InvalidValueError(None, 'id_rule', id_rule)\n\n rule = Rule.objects.get(pk=id_rule)\n rule.delete()\n\n return self.response(dumps_networkapi({}))\n\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n except Rule.DoesNotExist:\n return self.response_error(358)\n except UserNotAuthorizedError:\n return self.not_authorized()\n except Exception, e:\n return self.response_error(1)", "def delete_security_group_rule(self, security_group_rule):\r\n return self.delete(self.security_group_rule_path %\r\n (security_group_rule))", "def delete_firewall_policy(self, firewall_policy):\r\n return self.delete(self.firewall_policy_path % (firewall_policy))", "def minimum_bandwidth_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_minimum_bandwidth_rule(rule_id, policy_id)", "def rule_delete(self, sgr_id):\n self.client.delete_security_group_rule(sgr_id)", "def delete(self, request, l7_rule_id, l7_policy_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_l7_rule,\n l7_rule_id, l7_policy_id,\n load_balancer_getter=l7_policy_get_load_balancer_id,\n resource_id=l7_policy_id)", "def test_create_update_delete_firewall_rule(self):\n ctx = context.get_admin_context()\n clnt = self.plugin._client\n with self.firewall_rule(do_delete=False) as fwr:\n fwr_id = fwr['firewall_rule']['id']\n # Create Firewall Rule\n crd_rule = {'firewall_rule': fwr}\n clnt.create_firewall_rule.assert_called_once_with(fwr)\n # Update Firewall Rule\n data = {'firewall_rule': {'name': 'new_rule_name',\n 'source_port': '10:20',\n 'destination_port': '30:40'}}\n fw_rule = self.plugin.update_firewall_rule(ctx, fwr_id, data)\n crd_rule = {'firewall_rule': fw_rule}\n clnt.update_firewall_rule.assert_called_once_with(fwr_id, crd_rule)\n # Delete Firewall Rule\n self.plugin.delete_firewall_rule(ctx, fwr_id)\n clnt.delete_firewall_rule.assert_called_once_with(fwr_id)", "def delete_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n\n del self.rules[rule_name]\n del self.rule_source[rule_name]\n del self.rule_str[rule_name]\n del self.rule_ft[rule_name]\n\n return True", "def delete(self, package=\"\", uid=\"\", params={}):\n return self.__post('delete-nat-rule', package, uid, params)", "def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)" ]
[ "0.7865063", "0.75619334", "0.7403397", "0.7344586", "0.7260129", "0.70308244", "0.7010474", "0.68722457", "0.6847275", "0.68452483", "0.6841127", "0.6821352", "0.67873496", "0.6773156", "0.6763249", "0.6729523", "0.6712253", "0.6708854", "0.66834664", "0.6621239", "0.6598687", "0.65961057", "0.65659034", "0.65456486", "0.6543387", "0.6524158", "0.64640284", "0.64063567", "0.6400361", "0.6313823" ]
0.8690892
0
Fetches a list of all firewall policies for a tenant.
def list_firewall_policies(self, retrieve_all=True, **_params): # Pass filters in "params" argument to do_request return self.list('firewall_policies', self.firewall_policies_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_policies(self):\n client = self.connect(VAULT_TOKEN)\n return client.list_policies()", "def list_policies(self):\n return self.con.list_policies(\n Scope='Local'\n )", "def ListPolicies(self, request, global_params=None):\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(\n config, request, global_params=global_params)", "def list_policies(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"escalation_policies\",\n \"id\",\n __salt__[\"config.option\"](profile),\n api_key,\n opts=__opts__,\n )", "def policy_list(request, **kwargs):\n policies = neutronclient(request).list_qos_policies(\n **kwargs).get('policies')\n return [QoSPolicy(p) for p in policies]", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def list_auth_policies(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.AUTH_POLICIES_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )", "def all_schedule_policies(self):\n return self._policies", "def storage_policies(self, **kwargs):\n self.logger.debug(f\"Get storage policies data\")\n url_path = 'storage/policies'\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def list_policy_profiles(self, **params):\r\n return self.get(self.policy_profiles_path, params=params)", "def rbac_policy_list(request, **kwargs):\n policies = neutronclient(request).list_rbac_policies(\n **kwargs).get('rbac_policies')\n return [RBACPolicy(p) for p in policies]", "def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()", "def list_ikepolicies(self, retrieve_all=True, **_params):\r\n return self.list('ikepolicies', self.ikepolicies_path, retrieve_all,\r\n **_params)", "def list_policies(policystore_url, verbose):\n\n if verbose:\n logging.info('Listing policies')\n\n list_url = policystore_url + POLICYSTORE_PREFIX + 'ListEntitlementPolicies'\n\n r = requests.post(list_url, headers=headers(), json={})\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to list policies')\n\n logging.info('SUCCESS: Listed policies')\n\n resp = r.json()\n\n if verbose:\n logging.info('Policies retrieved')\n pprint.pprint(resp)\n\n return resp", "def list_domain_policy(self, _):\n FILETIME_TIMESTAMP_FIELDS = {\n \"lockOutObservationWindow\": (60, \"mins\"),\n \"lockoutDuration\": (60, \"mins\"),\n \"maxPwdAge\": (86400, \"days\"),\n \"minPwdAge\": (86400, \"days\"),\n \"forceLogoff\": (60, \"mins\")\n }\n\n FOREST_LEVELS = {\n 7: \"Windows Server 2016\",\n 6: \"Windows Server 2012 R2\",\n 5: \"Windows Server 2012\",\n 4: \"Windows Server 2008 R2\",\n 3: \"Windows Server 2008\",\n 2: \"Windows Server 2003\",\n 1: \"Windows Server 2003 operating system through Windows Server 2016\",\n 0: \"Windows 2000 Server operating system through Windows Server 2008 operating system\"\n }\n\n FIELDS_TO_PRINT = [\n \"dc\",\n \"distinguishedName\",\n \"lockOutObservationWindow\",\n \"lockoutDuration\",\n \"lockoutThreshold\",\n \"maxPwdAge\",\n \"minPwdAge\",\n \"minPwdLength\",\n \"pwdHistoryLength\",\n \"pwdProperties\",\n \"ms-DS-MachineAccountQuota\",\n \"msDS-Behavior-Version\"]\n\n policy = list(self.engine.query(self.engine.DOMAIN_INFO_FILTER()))\n if policy:\n policy = policy[0]\n for field in FIELDS_TO_PRINT:\n val = policy.get(field, None)\n if val is None:\n continue\n\n if field == \"lockOutObservationWindow\" and isinstance(val, timedelta):\n val = int(val.total_seconds()) / 60\n elif field in FILETIME_TIMESTAMP_FIELDS.keys() and type(val) == int:\n val = int((fabs(float(val)) / 10**7) / FILETIME_TIMESTAMP_FIELDS[field][0])\n if field in FILETIME_TIMESTAMP_FIELDS.keys():\n val = \"%d %s\" % (val, FILETIME_TIMESTAMP_FIELDS[field][1])\n if field == \"msDS-Behavior-Version\" and isinstance(val, int):\n val = \"%s\" % (FOREST_LEVELS[policy[field]])\n\n print(\"%s: %s\" % (field, val))", "def _get_policies(self):\n flag, response = self._commcell_object._cvpysdk_object.make_request('GET', self._POLICY)\n\n if flag:\n if response.json() and 'taskDetail' in response.json():\n policies = response.json()['taskDetail']\n policies_dict = {}\n\n for policy in policies:\n temp_name = policy['task']['taskName'].lower()\n temp_id = str(policy['task']['taskId']).lower()\n policies_dict[temp_name] = temp_id\n\n return policies_dict\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)", "def policies(self):\n return self._data.get('policies')", "def list_policy(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1PolicyList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def dns_policies(self, **kwargs):\n url_path = 'dns/policies'\n self.logger.debug(f\"Get RealTime DNS Policies\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def policies(self):\n return self._policies", "def list_policy_profile_bindings(self, **params):\r\n return self.get(self.policy_profile_bindings_path, params=params)", "def list_policies(self, compartment_id, **kwargs):\n resource_path = \"/policies\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_policies got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")", "def firewalls(self) -> Sequence['outputs.SubResourceResponse']:\n return pulumi.get(self, \"firewalls\")", "def list_ipsecpolicies(self, retrieve_all=True, **_params):\r\n return self.list('ipsecpolicies',\r\n self.ipsecpolicies_path,\r\n retrieve_all,\r\n **_params)", "def get_policies(self, query_params=dict(), **kwargs):\n policy_types = PolicyTypes(prefix=self.prefix, **kwargs)\n response = policy_types._get(query_params, **kwargs)\n policy_types.set_values(response)\n return policy_types", "def list_firewalls(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n\r\n return self.list('firewalls', self.firewalls_path, retrieve_all,\r\n **_params)", "def getMergePolicies(self, limit: int = 100) -> dict:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getMergePolicies\")\n path = \"/config/mergePolicies\"\n params = {\"limit\": limit}\n res = self.connector.getData(\n self.endpoint + path, params=params, headers=self.header\n )\n data = res[\"children\"]\n nextPage = res[\"_links\"][\"next\"].get(\"href\", \"\")\n while nextPage != \"\":\n path = \"/config/mergePolicies?\" + nextPage.split(\"?\")[1]\n res = self.connector.getData(\n self.endpoint + path, params=params, headers=self.header\n )\n data += res[\"children\"]\n nextPage = res[\"_links\"][\"next\"].get(\"href\", \"\")\n return data", "def get_all(nitro):\r\n __url = nitro.get_url() + NSPatset.get_resourcetype()\r\n __json_policypatsets = nitro.get(__url).get_response_field(NSPatset.get_resourcetype())\r\n __policypatsets = []\r\n for json_policypatset in __json_policypatsets:\r\n __policypatsets.append(NSPatset(json_policypatset))\r\n return __policypatsets", "def policy_rules(self) -> Sequence[Any]:\n return pulumi.get(self, \"policy_rules\")", "def test_get_hyperflex_sys_config_policy_list(self):\n pass" ]
[ "0.6369753", "0.5877332", "0.56792355", "0.5663501", "0.55814", "0.5498755", "0.54811", "0.54676265", "0.5452789", "0.54221016", "0.53502595", "0.53350145", "0.5325583", "0.5253421", "0.5231644", "0.52054423", "0.5202875", "0.5132389", "0.50926423", "0.5080169", "0.50724727", "0.506929", "0.5032529", "0.5026589", "0.50093234", "0.50070363", "0.4927348", "0.4924729", "0.49042305", "0.48643926" ]
0.6845963
0
Fetches information of a certain firewall policy.
def show_firewall_policy(self, firewall_policy, **_params): return self.get(self.firewall_policy_path % (firewall_policy), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_policy(client, policy_name):\n response = client.describe_firewall_policy(\n FirewallPolicyName=policy_name,\n )\n return response", "def read(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.debug(\"Reading the policy: %s\", address)\n response = self.vault.requests_request(\n \"GET\", address, headers=self.vault.token_header\n )\n policy_details = response.json()[\"data\"][\"policy\"]\n return policy_details", "def policy_info(self) -> 'outputs.PolicyInfoResponse':\n return pulumi.get(self, \"policy_info\")", "def get_sp_policy(self, context, id):\n # handling policy method in RPC\n response = self.dns_manager.get_sp_policy(context, id)\n return response", "def policy(self) -> Optional[str]:\n return pulumi.get(self, \"policy\")", "def policy_info(self) -> pulumi.Input['PolicyInfoArgs']:\n return pulumi.get(self, \"policy_info\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def describe(cls, policy):\n return cls.descriptions[policy] if cls.validates(policy) else None", "def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"policy\")", "def get_firewall_policy_output(expand: Optional[pulumi.Input[Optional[str]]] = None,\n firewall_policy_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFirewallPolicyResult]:\n ...", "def get_firewall_policy(expand: Optional[str] = None,\n firewall_policy_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallPolicyResult:\n __args__ = dict()\n __args__['expand'] = expand\n __args__['firewallPolicyName'] = firewall_policy_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getFirewallPolicy', __args__, opts=opts, typ=GetFirewallPolicyResult).value\n\n return AwaitableGetFirewallPolicyResult(\n base_policy=pulumi.get(__ret__, 'base_policy'),\n child_policies=pulumi.get(__ret__, 'child_policies'),\n etag=pulumi.get(__ret__, 'etag'),\n firewalls=pulumi.get(__ret__, 'firewalls'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n name=pulumi.get(__ret__, 'name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n rule_groups=pulumi.get(__ret__, 'rule_groups'),\n tags=pulumi.get(__ret__, 'tags'),\n threat_intel_mode=pulumi.get(__ret__, 'threat_intel_mode'),\n threat_intel_whitelist=pulumi.get(__ret__, 'threat_intel_whitelist'),\n type=pulumi.get(__ret__, 'type'))", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> Optional[pulumi.Input['ServicePolicyArgs']]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> Optional[pulumi.Input['ServicePolicyArgs']]:\n return pulumi.get(self, \"policy\")", "def get_policy(self):\n return self.agent.get_policy()", "def policy_get(request, policy_id, **kwargs):\n policy = neutronclient(request).show_qos_policy(\n policy_id, **kwargs).get('policy')\n return QoSPolicy(policy)", "def policy(self) -> pulumi.Output['outputs.ServicePolicy']:\n return pulumi.get(self, \"policy\")", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def show_policy_profile(self, profile, **params):\r\n return self.get(self.policy_profile_path % (profile), params=params)", "def getPolicy(self, state):\n return self.policy[state]", "def get_policy(self):\n try:\n LOG.debug(\"Searching for retention_policy in K2.\")\n return self.client.search(\"retention_policies\",\n name=\"Best_Effort_Retention\").hits[0]\n except Exception as ex:\n LOG.exception(\"Retention policy search failed in K2.\")\n raise KaminarioCinderDriverException(reason=ex)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n custom_block_response_body: Optional[pulumi.Input[str]] = None,\n custom_block_response_status_code: Optional[pulumi.Input[int]] = None,\n custom_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallPolicyCustomRuleArgs']]]]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n frontend_endpoint_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallPolicyManagedRuleArgs']]]]] = None,\n mode: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n redirect_url: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'FirewallPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _FirewallPolicyState.__new__(_FirewallPolicyState)\n\n __props__.__dict__[\"custom_block_response_body\"] = custom_block_response_body\n __props__.__dict__[\"custom_block_response_status_code\"] = custom_block_response_status_code\n __props__.__dict__[\"custom_rules\"] = custom_rules\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"frontend_endpoint_ids\"] = frontend_endpoint_ids\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"managed_rules\"] = managed_rules\n __props__.__dict__[\"mode\"] = mode\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"redirect_url\"] = redirect_url\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n return FirewallPolicy(resource_name, opts=opts, __props__=__props__)", "def get_firewall(client, firewall_name):\n response = client.describe_firewall(\n FirewallName=firewall_name,\n )\n return response", "def get_policy_by_id(self, id):\n for service, policy_list in self.remote_store.get_policy_list().items():\n for policy in policy_list:\n if policy.id == id:\n return policy", "def get_firewall_policy(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_firewall_policy(\n name_or_id, ignore_missing=True, **filters\n )" ]
[ "0.75920844", "0.6699691", "0.66329557", "0.6356902", "0.6346933", "0.6233168", "0.620738", "0.620738", "0.620738", "0.6192408", "0.6159254", "0.6159254", "0.6159254", "0.6134643", "0.60909563", "0.6058809", "0.6058809", "0.6014234", "0.6014234", "0.6004263", "0.5964909", "0.59239393", "0.5901499", "0.5894845", "0.5879315", "0.58202773", "0.5817182", "0.58063537", "0.576155", "0.5758967" ]
0.7842972
0
Creates a new firewall policy.
def create_firewall_policy(self, body=None): return self.post(self.firewall_policies_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Create(self,\n firewall_policy=None,\n parent_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeCreateRequestTuple(firewall_policy, parent_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Insert(\n self._MakeCreateRequestTuple(firewall_policy, parent_id)[2])\n return self.WaitOperation(\n op_res, message='Creating the organization firewall policy.')", "def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)", "def Create(self,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.AddRule(\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Adding a rule to the organization firewall policy.')", "def test_create_firewall_policy_with_all_params(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n firewall_rules_arg = 'rule_id1 rule_id2'\r\n firewall_rules_res = ['rule_id1', 'rule_id2']\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--firewall-rules', firewall_rules_arg,\r\n '--audited',\r\n '--tenant-id', tenant_id,\r\n '--admin-state_up',\r\n name]\r\n position_names = ['name', ]\r\n position_values = [name, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n firewall_rules=firewall_rules_res,\r\n audited=True, admin_state_up=True,\r\n tenant_id=tenant_id)", "def test_create_firewall_policy_with_mandatory_params(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n tenant_id = 'my-tenant'\r\n name = 'my-name'\r\n my_id = 'myid'\r\n args = ['--tenant-id', tenant_id,\r\n '--admin-state_up',\r\n name, ]\r\n position_names = ['name', ]\r\n position_values = [name, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True, tenant_id=tenant_id)", "def create_policy(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_policy`\")\n\n resource_path = '/oapi/v1/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_ipsecpolicy(self, body=None):\r\n return self.post(self.ipsecpolicies_path, body=body)", "def bandwidth_limit_rule_create(request, policy_id, **kwargs):\n body = {'bandwidth_limit_rule': kwargs}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'bandwidth_limit_rule': kwargs}\n rule = 'bandwidth_limit_rule'\n bandwidth_limit_rule = neutronclient(request)\\\n .create_bandwidth_limit_rule(policy_id, body).get(rule)\n return BandwidthLimitRule(bandwidth_limit_rule)", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def apply_firewall_policy(client, policy_name, policy_configs):\n rule_groups = []\n rule_group_arns = []\n for group, rule_config in policy_configs[\"rule_groups\"].items():\n get_group_response = get_rule_group(client, group)\n arn = get_group_response[\"RuleGroupResponse\"][\"RuleGroupArn\"]\n rule_groups.append(\n {\n \"ResourceArn\": arn,\n \"Priority\": rule_config[\"priority\"]\n }\n )\n\n # Check if policy exists and updates it\n try:\n get_response = get_policy(client, policy_name)\n print(f\"AWS Firewall policy {policy_name} already exists. Updating...\")\n # This needs retry/backoff logic in case UpdateToken is in use\n update_token = get_response[\"UpdateToken\"]\n response = client.update_firewall_policy(\n UpdateToken=update_token,\n FirewallPolicyName=policy_name,\n FirewallPolicy={\n \"StatelessRuleGroupReferences\": rule_groups,\n \"StatelessDefaultActions\": [\"aws:drop\"],\n \"StatelessFragmentDefaultActions\": [\"aws:drop\"]\n }\n )\n return response\n except client.exceptions.ResourceNotFoundException:\n print(f\"Creating AWS Firewall policy {policy_name}...\")\n \n response = client.create_firewall_policy(\n FirewallPolicyName=policy_name,\n FirewallPolicy={\n \"StatelessRuleGroupReferences\": rule_groups,\n \"StatelessDefaultActions\": [\"aws:drop\"],\n \"StatelessFragmentDefaultActions\": [\"aws:drop\"]\n }\n )\n return response", "def firewall_policy_insert_rule(self, firewall_policy, body=None):\r\n return self.put(self.firewall_policy_insert_path % (firewall_policy),\r\n body=body)", "def test_create_firewall_with_mandatory_params(self):\r\n resource = 'firewall'\r\n cmd = firewall.CreateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n name = ''\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n policy_id = 'my-policy-id'\r\n args = ['--tenant-id', tenant_id, policy_id, ]\r\n position_names = ['firewall_policy_id', ]\r\n position_values = [policy_id, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True, tenant_id=tenant_id)", "def create_policy(self, fn_inputs):\n\n # determine if the policy is already in place\n response, err_msg = self._get_policy_by_sha256(fn_inputs.get('reaqta_sha256'))\n if err_msg:\n return {}, err_msg\n\n policy_info = response.json()\n if policy_info.get('result'):\n return {}, 'A policy already exists for this file hash: {0}. <a href=\"{1}\" target=\"blank\">{1}</a>'.format(\n fn_inputs.get('reaqta_sha256'),\n self.make_linkback_url(policy_info['result'][0]['id'], POLICY_DETAILS))\n\n params = {\n \"sha256\": fn_inputs.get('reaqta_sha256'),\n \"title\": fn_inputs.get('reaqta_policy_title', ''),\n \"description\": fn_inputs.get('reaqta_policy_description', ''),\n \"disable\": not fn_inputs.get('reaqta_policy_enabled', True),\n \"block\": fn_inputs.get('reaqta_policy_block', False),\n \"enabledGroups\": [],\n \"disabledGroups\": []\n }\n\n # collect all the group names and find the groupIds\n if fn_inputs.get('reaqta_policy_included_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_included_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['enabledGroups'] = group_id_list\n\n if fn_inputs.get('reaqta_policy_excluded_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_excluded_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['disabledGroups'] = group_id_list\n\n LOG.debug(\"create_policy: %s\", params)\n url = urljoin(POLICY_URI, \"trigger-on-process-hash\")\n return self.api_call(\"POST\", url, params)", "def minimum_bandwidth_rule_create(request, policy_id, **kwargs):\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'minimum_bandwidth_rule': kwargs}\n rule = 'minimum_bandwidth_rule'\n minimum_bandwidth_rule = neutronclient(request)\\\n .create_minimum_bandwidth_rule(policy_id, body).get(rule)\n return MinimumBandwidthRule(minimum_bandwidth_rule)", "def create_policy(self, policy_name, policy_document, delete=True, **kwargs):\n try:\n Oprint.info('Creating IAM policy {}'.format(policy_name), 'iam')\n \n policy = self.get_policy(policy_name=policy_name)\n if policy and policy.get('Policy'):\n if not delete:\n Oprint.info('Found existing IAM policy {}'.format(policy_name), 'iam')\n return policy\n else:\n # Can not delete a policy if it has been attached\n if policy.get('Policy').get('AttachmentCount') > 0:\n Oprint.warn('Policy {} already exists and has been attached to a role. Cannot delete'.format(policy.get('Policy').get('PolicyName')), 'iam')\n return policy\n\n self._client.delete_policy(PolicyArn=self.get_policy_arn(policy_name))\n \n policy = self._client.create_policy(PolicyName=policy_name, PolicyDocument=policy_document, **kwargs)\n\n Oprint.info('IAM policy {} has been created'.format(policy_name), 'iam')\n except Exception as e:\n Oprint.err(e, 'iam')\n\n return policy", "def create_firewall(self, body=None):\r\n return self.post(self.firewalls_path, body=body)", "def minimum_packet_rate_rule_create(request, policy_id, **kwargs):\n body = {'minimum_packet_rate_rule': kwargs}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'minimum_packet_rate_rule': kwargs}\n rule = 'minimum_packet_rate_rule'\n minimum_packet_rate_rule = neutronclient(request)\\\n .create_minimum_packet_rate_rule(policy_id, body).get(rule)\n return MinimumPacketRateRule(minimum_packet_rate_rule)", "def add(self, policy_name, data):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.info(\"Adding the policy: %s\", address)\n payload = json.dumps({\"policy\": data})\n response = self.vault.requests_request(\n \"POST\", address, headers=self.vault.token_header, data=payload\n )", "def test_create_update_delete_firewall_policy(self):\n with self.firewall_policy(do_delete=False) as fwp:\n fwp_id = fwp['firewall_policy']['id']\n # Create Firewall Policy\n crd_policy = {'firewall_policy': fwp}\n self.clnt.create_firewall_policy.assert_called_once_with(fwp)\n # Update Firewall Policy\n data = {'firewall_policy': {'name': 'updated-name'}}\n fwp = self.plugin.update_firewall_policy(self.ctx, fwp_id, data)\n crd_policy = {'firewall_policy': fwp}\n self.clnt.update_firewall_policy.assert_called_once_with(\n fwp_id,\n crd_policy)\n # Delete Firewall Policy\n self.plugin.delete_firewall_policy(self.ctx, fwp_id)\n self.clnt.delete_firewall_policy.assert_called_once_with(fwp_id)", "def create_policy(self, create_policy_details, **kwargs):\n resource_path = \"/policies\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_policy_details,\n response_type=\"Policy\")", "def create_ingress_security_policy(self):\n if not VncSecurityPolicy.ingress_svc_fw_policy_uuid:\n ingress_svc_fw_policy_uuid =\\\n VncSecurityPolicy.create_firewall_policy(\n self._k8s_event_type,\n None, None, is_global=True)\n VncSecurityPolicy.add_firewall_policy(ingress_svc_fw_policy_uuid)\n VncSecurityPolicy.ingress_svc_fw_policy_uuid =\\\n ingress_svc_fw_policy_uuid", "def rbac_policy_create(request, **kwargs):\n body = {'rbac_policy': kwargs}\n rbac_policy = neutronclient(request).create_rbac_policy(\n body=body).get('rbac_policy')\n return RBACPolicy(rbac_policy)", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def create_policy(policystore_url, create_policy_request, verbose):\n\n if verbose:\n logging.info('Creating policy')\n pprint.pprint(create_policy_request)\n\n create_url = policystore_url + POLICYSTORE_PREFIX + 'CreateEntitlementPolicy'\n\n r = requests.post(\n create_url, headers=headers(), json=create_policy_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n\n sys.exit('Failed to create policy')\n\n resp = r.json()\n\n logging.info(\n f'SUCCESS: Created policy - ID: {resp[\"policy_id\"]}, Token: {resp[\"token\"]}'\n )\n\n return resp", "def post_network_policy_create(self, resource_dict):\n pass", "def create_firewall_rule(self, body=None):\r\n return self.post(self.firewall_rules_path, body=body)", "def __create_policy_def(self):\n\n self.logger.info(f\"Creating policy definition {self.policy_id}\")\n policy_definition_res = self.interactor.put_policy_definition(\n self.policy_id, self.policy_json\n )\n\n # definition was not created, report and abort\n if policy_definition_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy definition {self.policy_id} could not be created - {policy_definition_res.status_code}: {policy_definition_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True", "def create_ikepolicy(self, body=None):\r\n return self.post(self.ikepolicies_path, body=body)", "def __init__(__self__,\n resource_name: str,\n args: FirewallPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def pre_network_policy_create(self, resource_dict):\n pass" ]
[ "0.7127725", "0.6759481", "0.6576294", "0.64439803", "0.63746595", "0.6240316", "0.6238295", "0.6177763", "0.61514884", "0.61499244", "0.6108789", "0.60220695", "0.6016407", "0.601616", "0.60010135", "0.5972228", "0.5951646", "0.5945979", "0.58931273", "0.5854514", "0.583967", "0.58337307", "0.58031917", "0.57809925", "0.57620984", "0.5717147", "0.5677212", "0.56735826", "0.5653288", "0.56397164" ]
0.835379
0
Updates a firewall policy.
def update_firewall_policy(self, firewall_policy, body=None): return self.put(self.firewall_policy_path % (firewall_policy), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Update(self,\n fp_id=None,\n only_generate_request=False,\n firewall_policy=None,\n batch_mode=False):\n\n if batch_mode:\n requests = [\n self._MakeUpdateRequestTuple(\n fp_id=fp_id, firewall_policy=firewall_policy)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Patch(\n self._MakeUpdateRequestTuple(\n fp_id=fp_id, firewall_policy=firewall_policy)[2])\n return self.WaitOperation(\n op_res, message='Updating the organization firewall policy.')", "def test_update_firewall_policy(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def update_policy(self, *args, **kwargs):\r\n pass", "def update_policy(self):\n pass", "def Update(self,\n priority=None,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeUpdateRuleRequestTuple(\n priority=priority,\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.PatchRule(\n self._MakeUpdateRuleRequestTuple(\n priority=priority,\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Updating a rule in the organization firewall policy.')", "def UpdatePolicy(self, request, global_params=None):\n config = self.GetMethodConfig('UpdatePolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "def put(self, request, l7_policy_id):\n kwargs = {'l7_policy_id': l7_policy_id}\n update_l7_policy(request, **kwargs)", "def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]", "def device_update_policy(self, device_ids, policy_id):\n return self._device_action(device_ids, \"UPDATE_POLICY\", {\"policy_id\": policy_id})", "def update_policy(policy_id):\n old_policy = PolicyService.get_policy_by_id(policy_id)\n if old_policy is None:\n abort(404)\n new_policy = PolicyService.update_policy_by_id(policy_id, json_to_policy(request.json))\n if new_policy is None:\n abort(406)\n return new_policy.__dict__", "def put(self, request, l7_rule_id, l7_policy_id):\n kwargs = {'l7_rule_id': l7_rule_id, 'l7_policy_id': l7_policy_id}\n update_l7_rule(request, **kwargs)", "def apply_firewall_policy(client, policy_name, policy_configs):\n rule_groups = []\n rule_group_arns = []\n for group, rule_config in policy_configs[\"rule_groups\"].items():\n get_group_response = get_rule_group(client, group)\n arn = get_group_response[\"RuleGroupResponse\"][\"RuleGroupArn\"]\n rule_groups.append(\n {\n \"ResourceArn\": arn,\n \"Priority\": rule_config[\"priority\"]\n }\n )\n\n # Check if policy exists and updates it\n try:\n get_response = get_policy(client, policy_name)\n print(f\"AWS Firewall policy {policy_name} already exists. Updating...\")\n # This needs retry/backoff logic in case UpdateToken is in use\n update_token = get_response[\"UpdateToken\"]\n response = client.update_firewall_policy(\n UpdateToken=update_token,\n FirewallPolicyName=policy_name,\n FirewallPolicy={\n \"StatelessRuleGroupReferences\": rule_groups,\n \"StatelessDefaultActions\": [\"aws:drop\"],\n \"StatelessFragmentDefaultActions\": [\"aws:drop\"]\n }\n )\n return response\n except client.exceptions.ResourceNotFoundException:\n print(f\"Creating AWS Firewall policy {policy_name}...\")\n \n response = client.create_firewall_policy(\n FirewallPolicyName=policy_name,\n FirewallPolicy={\n \"StatelessRuleGroupReferences\": rule_groups,\n \"StatelessDefaultActions\": [\"aws:drop\"],\n \"StatelessFragmentDefaultActions\": [\"aws:drop\"]\n }\n )\n return response", "def rbac_policy_update(request, policy_id, **kwargs):\n body = {'rbac_policy': kwargs}\n rbac_policy = neutronclient(request).update_rbac_policy(\n policy_id, body=body).get('rbac_policy')\n return RBACPolicy(rbac_policy)", "def update_policy(ranger_url, policy_id, policy_data, admin_username_password):\n\n url = format(\"{ranger_url}/service/public/v2/api/policy/{policy_id}\")\n\n base_64_string = base64.encodestring(admin_username_password).replace('\\n', '')\n\n request = urllib2.Request(url, json.dumps(policy_data))\n request.get_method = lambda: 'PUT'\n request.add_header('Content-Type', 'application/json')\n request.add_header('Accept', 'application/json')\n request.add_header('Authorization', format('Basic {base_64_string}'))\n\n try:\n result = openurl(request, timeout=20)\n response_code = result.getcode()\n if response_code == 200:\n Logger.info(format(\"Successfully updated policy in Ranger Admin\"))\n return response_code\n else:\n Logger.error(format(\"Unable to update policy in Ranger Admin\"))\n return None\n except urllib2.HTTPError as e:\n raise Fail(\"HTTPError while updating policy Reason = \" + str(e.code))\n except urllib2.URLError as e:\n raise Fail(\"URLError while updating policy. Reason = \" + str(e.reason))\n except TimeoutError:\n raise Fail(\"Connection timeout error while updating policy\")\n except Exception as err:\n raise Fail(format(\"Error while updating policy. Reason = {err}\"))", "def test_create_update_delete_firewall_policy(self):\n with self.firewall_policy(do_delete=False) as fwp:\n fwp_id = fwp['firewall_policy']['id']\n # Create Firewall Policy\n crd_policy = {'firewall_policy': fwp}\n self.clnt.create_firewall_policy.assert_called_once_with(fwp)\n # Update Firewall Policy\n data = {'firewall_policy': {'name': 'updated-name'}}\n fwp = self.plugin.update_firewall_policy(self.ctx, fwp_id, data)\n crd_policy = {'firewall_policy': fwp}\n self.clnt.update_firewall_policy.assert_called_once_with(\n fwp_id,\n crd_policy)\n # Delete Firewall Policy\n self.plugin.delete_firewall_policy(self.ctx, fwp_id)\n self.clnt.delete_firewall_policy.assert_called_once_with(fwp_id)", "def update_apic(self):\n return self.client.policy.update(policyList=self.policy_list.response)", "def update_firewall_rule(self, firewall_rule, body=None):\r\n return self.put(self.firewall_rule_path % (firewall_rule), body=body)", "def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)", "def create_firewall_policy(self, body=None):\r\n return self.post(self.firewall_policies_path, body=body)", "def bandwidth_limit_rule_update(request, policy_id, rule_id, **kwargs):\n body = {'bandwidth_limit_rule': kwargs}\n ruleType = 'bandwidth_limit_rule'\n bandwidthlimit_update = neutronclient(request)\\\n .update_bandwidth_limit_rule(rule_id, policy_id, body)\\\n .get(ruleType)\n return BandwidthLimitRule(bandwidthlimit_update)", "def update(self,\n dns_forwarder_zone_id,\n policy_dns_forwarder_zone,\n ):\n return self._invoke('update',\n {\n 'dns_forwarder_zone_id': dns_forwarder_zone_id,\n 'policy_dns_forwarder_zone': policy_dns_forwarder_zone,\n })", "def _modify_schedule_policy_properties(self):\n request_json = {\n 'taskInfo':\n {\n 'taskOperation': 1,\n 'associations': self._associations,\n 'task': self._task_json,\n \"appGroup\":\n {\n \"appGroups\": self._app_groups if self._app_groups else [],\n },\n 'subTasks': self._subtasks\n }\n }\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'PUT', self._MODIFY_SCHEDULE_POLICY, request_json\n )\n output = self._process_schedule_policy_update_response(flag, response)\n self.refresh()\n\n if output[0]:\n return\n\n o_str = 'Failed to update properties of Schedule Policy\\nError: \"{0}\"'\n raise SDKException('Schedules', '102', o_str.format(output[2]))", "def update_l7_policy(request, **kwargs):\n data = request.DATA\n l7_policy_id = data['l7policy'].get('id')\n\n conn = get_sdk_connection(request)\n l7_policy = conn.load_balancer.update_l7_policy(\n action=data['l7policy']['action'],\n admin_state_up=data['l7policy'].get('admin_state_up'),\n description=data['l7policy'].get('description'),\n l7_policy=l7_policy_id,\n name=data['l7policy'].get('name'),\n position=data['l7policy'].get('position'),\n redirect_pool_id=data['l7policy'].get('redirect_pool_id'),\n redirect_url=data['l7policy'].get('redirect_url'),\n )\n\n return _get_sdk_object_dict(l7_policy)", "def setPolicy(self, value):\n return self._set(policy=value)", "def policy_update_fn(self, data: Dict[str, Any], result: Dict[str, Any]) -> None:", "def update(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('update',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def update_policy(self, policy_id, update_policy_details, **kwargs):\n resource_path = \"/policies/{policyId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"policyId\": policy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_policy_details,\n response_type=\"Policy\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_policy_details,\n response_type=\"Policy\")", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def update_policy(self, policy, inverse_policy=None):\n self.make_T_policy_matrix(policy)\n self.inverse_dynamics_by_time = dict()\n self.policy = policy\n self.inverse_policy = inverse_policy", "def device_update_policy(self, device_update_policy):\n\n self._device_update_policy = device_update_policy" ]
[ "0.77502215", "0.7484777", "0.73084265", "0.71117526", "0.6945951", "0.67466795", "0.6641654", "0.6378163", "0.62731165", "0.6264349", "0.61965215", "0.6191666", "0.61300886", "0.6126616", "0.6038952", "0.6031837", "0.60005814", "0.5951066", "0.5923457", "0.5921816", "0.59033364", "0.58995235", "0.5865775", "0.5860726", "0.5763903", "0.5762444", "0.5754521", "0.5716749", "0.5714008", "0.56977355" ]
0.8141987
0
Deletes the specified firewall policy.
def delete_firewall_policy(self, firewall_policy): return self.delete(self.firewall_policy_path % (firewall_policy))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Delete(self, fp_id=None, batch_mode=False, only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeDeleteRequestTuple(fp_id=fp_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Delete(self._MakeDeleteRequestTuple(fp_id=fp_id)[2])\n operation_poller = DeletePoller(self._service, self.ref)\n return self.WaitOperation(\n op_res,\n operation_poller=operation_poller,\n message='Deleting the organization firewall policy.')", "def policy_delete(request, policy_id):\n neutronclient(request).delete_qos_policy(policy_id)", "def delete_policy(self, policy_name):\r\n return self.connection.delete_lb_policy(self.name, policy_name)", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def Delete(self,\n priority=None,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveRule(\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting a rule from the organization firewall policy.')", "def firewall_policy_remove_rule(self, firewall_policy, body=None):\r\n return self.put(self.firewall_policy_remove_path % (firewall_policy),\r\n body=body)", "def delete_policy(self, policy_ref: str) -> None:\n self.batch_write(\n [self.batch_detach_policy(policy_ref, obj_ref) for obj_ref in self.list_policy_attachments(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n self.batch_write(\n [self.batch_detach_object(parent_ref, link_name) for parent_ref, link_name in self.list_object_parents(\n policy_ref,\n ConsistencyLevel=ConsistencyLevel.SERIALIZABLE.name)])\n retry(**cd_read_retry_parameters)(cd_client.delete_object)(\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': policy_ref})", "def delete_policy(policy_id):\n policy = PolicyService.get_policy_by_id(policy_id)\n if policy is None:\n abort(404)\n\n policy.delete()\n\n return {}", "def delete_firewall_policy(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_policy = self.network.find_firewall_policy(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_policy(\n firewall_policy, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall policy %s not found for deleting', name_or_id\n )\n return False\n return True", "def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))", "def DeleteAssociation(self,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeDeleteAssociationRequestTuple(firewall_policy_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveAssociation(\n self._MakeDeleteAssociationRequestTuple(firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting the association for the organization firewall policy.'\n )", "def delete_policy(policystore_url, policy_credentials, verbose):\n\n if verbose:\n logging.info('Deleting policy')\n pprint.pprint(policy_credentials)\n\n delete_url = policystore_url + POLICYSTORE_PREFIX + 'DeleteEntitlementPolicy'\n\n r = requests.post(delete_url, headers=headers(), json=policy_credentials)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to delete policy')\n\n logging.info('SUCCESS: Deleted policy')", "def delete_ipsecpolicy(self, ipsecpolicy):\r\n return self.delete(self.ipsecpolicy_path % (ipsecpolicy))", "def delete_firewall_rule(self, firewall_rule):\r\n return self.delete(self.firewall_rule_path % (firewall_rule))", "def policy_delete_all(session, domain, path=\"/\"):\n client = session.client('iam')\n resp = client.list_policies(Scope='Local', PathPrefix=path)\n\n prefix = domain.replace('.', '-')\n for policy in resp.get('Policies', []):\n if policy['PolicyName'].startswith(prefix):\n ARN = policy['Arn']\n if policy['AttachmentCount'] > 0:\n # cannot delete a policy if it is still in use\n attached = client.list_entities_for_policy(PolicyArn=ARN)\n for group in attached.get('PolicyGroups', []):\n client.detach_group_policy(GroupName=group['GroupName'], PolicyArn=ARN)\n for user in attached.get('PolicyUsers', []):\n client.detach_user_policy(UserName=user['UserName'], PolicyArn=ARN)\n for role in attached.get('PolicyRoles', []):\n client.detach_role_policy(RoleName=role['RoleName'], PolicyArn=ARN)\n client.delete_policy(PolicyArn=ARN)", "def remove_policy(self, sec, ptype, rule):\n line = self.convert_to_item(ptype, rule)\n\n _id = line['id']['S']\n\n self.dynamodb.delete_item(\n Key={\n 'id': {\n 'S': _id,\n }\n },\n TableName=self.table_name,\n )\n\n return True", "def delete(self, request, l7_policy_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_l7_policy,\n l7_policy_id,\n load_balancer_getter=l7_policy_get_load_balancer_id,\n resource_id=l7_policy_id)", "def delete(self, schedule_policy_name):\n\n if schedule_policy_name and not isinstance(schedule_policy_name, basestring):\n raise SDKException('Schedules', '102')\n\n schedule_policy_name = schedule_policy_name.lower()\n schedule_policy_id = self.all_schedule_policies.get(schedule_policy_name)\n\n if schedule_policy_id:\n request_json = {\n \"TMMsg_TaskOperationReq\":\n {\n \"opType\": 3,\n \"taskEntities\":\n [\n {\n \"_type_\": 69,\n \"taskId\": schedule_policy_id\n }\n ]\n }\n }\n\n modify_schedule = self._commcell_object._services['EXECUTE_QCOMMAND']\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', modify_schedule, request_json)\n\n if flag:\n if response.json():\n if 'errorCode' in response.json():\n if response.json()['errorCode'] == 0:\n self.refresh()\n else:\n raise SDKException(\n 'Schedules', '102', response.json()['errorMessage'])\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(\n response.text)\n exception_message = 'Failed to delete schedule policy\\nError: \"{0}\"'.format(\n response_string)\n\n raise SDKException('Schedules', '102', exception_message)\n else:\n raise SDKException(\n 'Schedules', '102', 'No schedule policy exists for: {0}'.format(\n schedule_policy_id)\n )", "def bandwidth_limit_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_bandwidth_limit_rule(rule_id, policy_id)", "def delete_policy(self, policy_id, **kwargs):\n resource_path = \"/policies/{policyId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_policy got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"policyId\": policy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def dscp_marking_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_dscp_marking_rule(rule_id, policy_id)", "def delete_retention_policy(self) -> pulumi.Output[Optional['outputs.DeleteRetentionPolicyResponse']]:\n return pulumi.get(self, \"delete_retention_policy\")", "def test_create_update_delete_firewall_policy(self):\n with self.firewall_policy(do_delete=False) as fwp:\n fwp_id = fwp['firewall_policy']['id']\n # Create Firewall Policy\n crd_policy = {'firewall_policy': fwp}\n self.clnt.create_firewall_policy.assert_called_once_with(fwp)\n # Update Firewall Policy\n data = {'firewall_policy': {'name': 'updated-name'}}\n fwp = self.plugin.update_firewall_policy(self.ctx, fwp_id, data)\n crd_policy = {'firewall_policy': fwp}\n self.clnt.update_firewall_policy.assert_called_once_with(\n fwp_id,\n crd_policy)\n # Delete Firewall Policy\n self.plugin.delete_firewall_policy(self.ctx, fwp_id)\n self.clnt.delete_firewall_policy.assert_called_once_with(fwp_id)", "def delete_retention_policy(self) -> Optional[pulumi.Input['DeleteRetentionPolicyArgs']]:\n return pulumi.get(self, \"delete_retention_policy\")", "def delete_ikepolicy(self, ikepolicy):\r\n return self.delete(self.ikepolicy_path % (ikepolicy))", "def delete_metric_policy(ContainerName=None):\n pass", "def minimum_packet_rate_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_minimum_packet_rate_rule(rule_id, policy_id)", "def delete_group_policy(self, group_name, policy_name):\r\n params = {'GroupName' : group_name,\r\n 'PolicyName' : policy_name}\r\n return self.get_response('DeleteGroupPolicy', params, verb='POST')", "def minimum_bandwidth_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_minimum_bandwidth_rule(rule_id, policy_id)" ]
[ "0.7437354", "0.73796976", "0.69442236", "0.6904816", "0.68614036", "0.6812244", "0.67235583", "0.65640336", "0.6510478", "0.6499342", "0.6492141", "0.6300546", "0.62801087", "0.6122406", "0.5986225", "0.59847015", "0.5982568", "0.5902126", "0.5881453", "0.58646894", "0.5812553", "0.58114135", "0.5801886", "0.57987165", "0.57518935", "0.57474625", "0.57427704", "0.5728303", "0.57013613", "0.5686265" ]
0.8475224
0
Inserts specified rule into firewall policy.
def firewall_policy_insert_rule(self, firewall_policy, body=None): return self.put(self.firewall_policy_insert_path % (firewall_policy), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rule(self, rule):\n \n self.rules.append(rule)", "def insert(self, rule, ident=None):\n if ident is None:\n self.rules.append(rule)\n else:\n self.rules.insert(ident, rule)", "def add_rule(self, rule):\n self.rule.append(rule)", "def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def insert_rule_into_policy(\n self,\n name_or_id,\n rule_name_or_id,\n insert_after=None,\n insert_before=None,\n filters=None,\n ):\n if not filters:\n filters = {}\n firewall_policy = self.network.find_firewall_policy(\n name_or_id, ignore_missing=False, **filters\n )\n\n firewall_rule = self.network.find_firewall_rule(\n rule_name_or_id, ignore_missing=False\n )\n # short-circuit if rule already in firewall_rules list\n # the API can't do any re-ordering of existing rules\n if firewall_rule['id'] in firewall_policy['firewall_rules']:\n self.log.debug(\n 'Firewall rule %s already associated with firewall policy %s',\n rule_name_or_id,\n name_or_id,\n )\n return firewall_policy\n\n pos_params = {}\n if insert_after is not None:\n pos_params['insert_after'] = self.network.find_firewall_rule(\n insert_after, ignore_missing=False\n )['id']\n\n if insert_before is not None:\n pos_params['insert_before'] = self.network.find_firewall_rule(\n insert_before, ignore_missing=False\n )['id']\n\n return self.network.insert_rule_into_policy(\n firewall_policy['id'], firewall_rule['id'], **pos_params\n )", "def add_rule(self, rule: Rule):\n self.rules.append(rule)", "def add_policy(self, sec, ptype, rule):\n self._save_policy_line(ptype, rule)", "def add_rule(self, rule):\n assert isinstance(rule, Rule)\n self.rule.append(rule)", "def _insert_rule(cls, rule_suffix: str) -> None:\n insert_rule = cls._build_rule_string(IpTableCommandOption.INSERT, rule_suffix)\n log.info('Adding rule \"%s\"', insert_rule)\n utils.run_command(insert_rule, shell=True)", "def add_rule(self, rule) -> None:\n self.add_rules([rule])", "def insert_rule(rule, table=None):\n if not rule_exists(rule, table=table):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-I\"] + rule\n return call(cmdline)", "def add_policy(self, sec, ptype, rule):\r\n self._save_policy_line(ptype, rule)\r\n return True", "def append_rule(self, rule):\n\n self._control_manager.append_rule(rule)", "def insert(self, rule, ident):\n self[ident] = rule", "def test_insert_firewall_rule(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.FirewallPolicyInsertRule(\r\n test_cli20.MyApp(sys.stdout),\r\n None)\r\n myid = 'myid'\r\n args = ['myid', 'newrule',\r\n '--insert-before', 'rule2',\r\n '--insert-after', 'rule1']\r\n extrafields = {'firewall_rule_id': 'newrule',\r\n 'insert_before': 'rule2',\r\n 'insert_after': 'rule1'}\r\n\r\n self.mox.StubOutWithMock(cmd, \"get_client\")\r\n self.mox.StubOutWithMock(self.client.httpclient, \"request\")\r\n cmd.get_client().MultipleTimes().AndReturn(self.client)\r\n body = extrafields\r\n path = getattr(self.client, resource + \"_insert_path\")\r\n self.client.httpclient.request(\r\n test_cli20.MyUrlComparator(\r\n test_cli20.end_url(path % myid, format=self.format),\r\n self.client),\r\n 'PUT', body=test_cli20.MyComparator(body, self.client),\r\n headers=mox.ContainsKeyValue(\r\n 'X-Auth-Token',\r\n test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))\r\n args.extend(['--request-format', self.format])\r\n self.mox.ReplayAll()\r\n cmd_parser = cmd.get_parser(resource + \"_insert_rule\")\r\n shell.run_command(cmd, cmd_parser, args)\r\n self.mox.VerifyAll()\r\n self.mox.UnsetStubs()", "def add_rule(self, rule: interpreter.Rule) -> None:\n\n if rule.target not in self.rules:\n self.rules[rule.target] = rule\n else:\n self.rules[rule.target] |= rule", "def test_insert_rule_into_policy_compact(self):\n rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs)\n retrieved_policy = deepcopy(self.mock_firewall_policy)\n retrieved_policy['firewall_rules'] = []\n updated_policy = deepcopy(retrieved_policy)\n updated_policy['firewall_rules'].append(rule['id'])\n self.register_uris(\n [\n dict(\n method='GET', # short-circuit\n uri=self._make_mock_url(\n 'firewall_policies', self.firewall_policy_name\n ),\n status_code=404,\n ),\n dict(\n method='GET',\n uri=self._make_mock_url(\n 'firewall_policies', name=self.firewall_policy_name\n ),\n json={'firewall_policies': [retrieved_policy]},\n ),\n dict(\n method='GET', # short-circuit\n uri=self._make_mock_url('firewall_rules', rule['name']),\n status_code=404,\n ),\n dict(\n method='GET',\n uri=self._make_mock_url(\n 'firewall_rules', name=rule['name']\n ),\n json={'firewall_rules': [rule]},\n ),\n dict(\n method='PUT',\n uri=self._make_mock_url(\n 'firewall_policies',\n retrieved_policy['id'],\n 'insert_rule',\n ),\n json=updated_policy,\n validate=dict(\n json={\n 'firewall_rule_id': rule['id'],\n 'insert_after': None,\n 'insert_before': None,\n }\n ),\n ),\n ]\n )\n r = self.cloud.insert_rule_into_policy(\n self.firewall_policy_name, rule['name']\n )\n self.assertDictEqual(updated_policy, r.to_dict())\n self.assert_calls()", "def add_rule(cls, rule: RewriteRule) -> None:\n if not isinstance(rule, RewriteRule):\n raise ValueError(f\"add_rule expected a RewriteRule not a '{type(rule)}'.\")\n cls.rules.append(rule)", "def insert(self, rule, ident):\n raise NotImplementedError", "def add_rule(self, rule, on=None, off=None, strength=1.):\n\n self.x[on:off, :, get_rule_index(rule, self.config)] = strength", "def add_rule(self, rule: validation.rule.Rule):\n self._rules.append(rule)\n\n return self", "def add_rule(rule):\n global RULE_DICT\n\n if rule[0] not in RULE_DICT:\n RULE_DICT[rule[0]] = []\n RULE_DICT[rule[0]].append(rule[1:])", "def put_resolver_rule_policy(Arn=None, ResolverRulePolicy=None):\n pass", "def create_firewall_rule(self, body=None):\r\n return self.post(self.firewall_rules_path, body=body)", "def add_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n rule = IPPermissions(self)\r\n rule.ip_protocol = ip_protocol\r\n rule.from_port = from_port\r\n rule.to_port = to_port\r\n self.rules.append(rule)\r\n rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)", "def _add_rule(cls, rule_suffix: str) -> None:\n if not cls._does_rule_exist(rule_suffix):\n cls._insert_rule(rule_suffix)", "def update_firewall_rule(self, firewall_rule, body=None):\r\n return self.put(self.firewall_rule_path % (firewall_rule), body=body)", "def add(self, trule, save_to_db=True):\n if save_to_db:\n self.save_to_db([trule])\n self._rules[trule.ruleid] = trule", "def _add_rule(self, rule):\r\n rule = re.sub(r'\\s*', '', rule)\r\n\r\n # split it on the arrow\r\n non_terminal, productions = rule.split('->')\r\n for production in productions.split('|'):\r\n self.productions.append(Production(non_terminal, list(production)))", "def register_rule(cls, rule_func):\n cls._rules_factories.append(rule_func)" ]
[ "0.74442726", "0.74174887", "0.72671765", "0.7232407", "0.7228418", "0.7160213", "0.7102518", "0.70741", "0.7025056", "0.7023518", "0.69237626", "0.6915233", "0.686694", "0.67392105", "0.6585327", "0.65667284", "0.6563158", "0.6464058", "0.6449564", "0.6287459", "0.6277808", "0.6265383", "0.6233146", "0.6217277", "0.61572564", "0.6095535", "0.6088974", "0.6072596", "0.60715", "0.60582227" ]
0.81768304
0
Removes specified rule from firewall policy.
def firewall_policy_remove_rule(self, firewall_policy, body=None): return self.put(self.firewall_policy_remove_path % (firewall_policy), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_firewall_rule(self, firewall_rule):\r\n return self.delete(self.firewall_rule_path % (firewall_rule))", "def remove_policy(self, sec, ptype, rule):\n line = self.convert_to_item(ptype, rule)\n\n _id = line['id']['S']\n\n self.dynamodb.delete_item(\n Key={\n 'id': {\n 'S': _id,\n }\n },\n TableName=self.table_name,\n )\n\n return True", "def delete_firewall_policy(self, firewall_policy):\r\n return self.delete(self.firewall_policy_path % (firewall_policy))", "def bandwidth_limit_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_bandwidth_limit_rule(rule_id, policy_id)", "def remove_rule(self, chain, rule, wrap=True, top=False):\n try:\n self.rules.remove(IptablesRule(chain, rule, wrap, top))\n if not wrap:\n self.remove_rules.append(IptablesRule(chain, rule, wrap, top))\n self.dirty = True\n except ValueError:\n pass", "def removeRule(self, *args):\n return _libsbml.Model_removeRule(self, *args)", "def remove_rule(self, rule_number):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tif len(self._blacklist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\trule = self._blacklist_rules.pop(rule_number)\n\t\t\tself._log.info('Removing rule from the blacklist rules set: %s' % rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tif len(self._whitelist_rules) - 1 < rule_number:\n\t\t\t\traise ValueError('Rule not found in rules list')\n\t\t\trule = self._whitelist_rules.pop(rule_number)\n\t\t\tself._log.info('Removing rule from the whitelist rules set: %s' % rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()\n\t\treturn rule", "def delete_firewall_rule(self, server_uuid, firewall_rule_position):\n url = f'/server/{server_uuid}/firewall_rule/{firewall_rule_position}'\n return self.api.delete_request(url)", "def minimum_packet_rate_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_minimum_packet_rate_rule(rule_id, policy_id)", "def remove_rule_from_policy(\n self, name_or_id, rule_name_or_id, filters=None\n ):\n if not filters:\n filters = {}\n firewall_policy = self.network.find_firewall_policy(\n name_or_id, ignore_missing=False, **filters\n )\n\n firewall_rule = self.network.find_firewall_rule(rule_name_or_id)\n if not firewall_rule:\n # short-circuit: if firewall rule is not found,\n # return current firewall policy\n self.log.debug(\n 'Firewall rule %s not found for removing', rule_name_or_id\n )\n return firewall_policy\n\n if firewall_rule['id'] not in firewall_policy['firewall_rules']:\n # short-circuit: if firewall rule id is not associated,\n # log it to debug and return current firewall policy\n self.log.debug(\n 'Firewall rule %s not associated with firewall policy %s',\n rule_name_or_id,\n name_or_id,\n )\n return firewall_policy\n\n return self.network.remove_rule_from_policy(\n firewall_policy['id'], firewall_rule['id']\n )", "def delete_rule(self, ruleid):\n path = '%s/security-group-rules/%s' % (self.ver, ruleid)\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group rule %s: %s' % \n (ruleid, truncate(res)))\n return res[0]", "def delete_snat_rule(self, rule, ignore_missing=True):\n return self._delete(_snat.Rule, rule, ignore_missing=ignore_missing)", "def minimum_bandwidth_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_minimum_bandwidth_rule(rule_id, policy_id)", "def remove_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n target_rule = None\r\n for rule in self.rules:\r\n if rule.ip_protocol == ip_protocol:\r\n if rule.from_port == from_port:\r\n if rule.to_port == to_port:\r\n target_rule = rule\r\n target_grant = None\r\n for grant in rule.grants:\r\n if grant.name == src_group_name:\r\n if grant.owner_id == src_group_owner_id:\r\n if grant.cidr_ip == cidr_ip:\r\n target_grant = grant\r\n if target_grant:\r\n rule.grants.remove(target_grant)\r\n if len(rule.grants) == 0:\r\n self.rules.remove(target_rule)", "def cloudflare_waf_firewall_rule_delete_request(self, rule_id: str, zone_id: str) -> Dict[str, Any]:\n return self._http_request(\n method='DELETE',\n url_suffix=f'zones/{zone_id}/firewall/rules',\n params={'id': rule_id})", "def remove_policy(self, sec, ptype, rule):\r\n deleted_count = self._delete_policy_lines(ptype, rule)\r\n return deleted_count > 0", "def delete_rule(self, index):\n del self.rules[index]", "def _remove_rule(cls, rule_suffix: str) -> None:\n if cls._does_rule_exist(rule_suffix):\n cls._delete_rule(rule_suffix)", "def delete_rule(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-D\"] + rule\n return call(cmdline)", "def delete_security_group_rule(self, security_group_rule):\r\n return self.delete(self.security_group_rule_path %\r\n (security_group_rule))", "def Delete(self,\n priority=None,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveRule(\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting a rule from the organization firewall policy.')", "def delete_resolver_rule(ResolverRuleId=None):\n pass", "def _delete_rule(cls, rule_suffix: str) -> None:\n delete_rule = cls._build_rule_string(IpTableCommandOption.DELETE, rule_suffix)\n log.info('Delete rule \"%s\"', delete_rule)\n utils.run_command(delete_rule, shell=True)", "def delete(self, request, l7_rule_id, l7_policy_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_l7_rule,\n l7_rule_id, l7_policy_id,\n load_balancer_getter=l7_policy_get_load_balancer_id,\n resource_id=l7_policy_id)", "def delete_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n\n del self.rules[rule_name]\n del self.rule_source[rule_name]\n del self.rule_str[rule_name]\n del self.rule_ft[rule_name]\n\n return True", "def delete_rule(uuid):\n with session_for_write() as session:\n stmt = (\n delete(\n model.RuleAction\n ).where(\n model.RuleAction.rule == uuid\n ).execution_options(synchronize_session=False)\n )\n session.execute(stmt)\n\n stmt = (\n delete(\n model.RuleCondition\n ).where(\n model.RuleCondition.rule == uuid\n ).execution_options(synchronize_session=False)\n )\n session.execute(stmt)\n\n stmt = (\n delete(\n model.Rule\n ).where(\n model.Rule.uuid == uuid\n ).execution_options(synchronize_session=False)\n )\n res = session.execute(stmt)\n if res.rowcount == 0:\n raise utils.RuleNotFoundError(uuid)", "def rule_delete(self, sgr_id):\n self.client.delete_security_group_rule(sgr_id)", "def delete_rule(self, id: str, rule_id: str) -> dict:\n r = requests.delete(self.url + '/{}/rules/{}'.format(id, rule_id), headers=self.headers)\n\n return r.json()", "def dscp_marking_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_dscp_marking_rule(rule_id, policy_id)", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))" ]
[ "0.77855355", "0.7329481", "0.71321225", "0.69293594", "0.6861332", "0.68613195", "0.6839967", "0.68148744", "0.6789386", "0.6733025", "0.67243755", "0.67061925", "0.6678299", "0.66566837", "0.6602159", "0.65895474", "0.65440726", "0.6460611", "0.64314723", "0.64075226", "0.6397083", "0.6340744", "0.62996733", "0.6269697", "0.62540823", "0.62282723", "0.6209108", "0.618899", "0.6154467", "0.6135202" ]
0.8063575
0
Fetches information of a certain firewall.
def show_firewall(self, firewall, **_params): return self.get(self.firewall_path % (firewall), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_firewall(client, firewall_name):\n response = client.describe_firewall(\n FirewallName=firewall_name,\n )\n return response", "def show_firewall_rule(self, firewall_rule, **_params):\r\n return self.get(self.firewall_rule_path % (firewall_rule),\r\n params=_params)", "def get_firewall_capacities(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/capacity\")\n if request.ok:\n return request.json()\n else:\n logger.warning(f\"Getting firewall capacities failed with code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Getting firewall capacities failed with code {request.status_code}\",\n \"time\": time.time()})\n\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})", "def get_firewalls(self):\r\n mask = ('firewallNetworkComponents,'\r\n 'networkVlanFirewall,'\r\n 'dedicatedFirewallFlag,'\r\n 'firewallGuestNetworkComponents,'\r\n 'firewallInterfaces,'\r\n 'firewallRules,'\r\n 'highAvailabilityFirewallFlag')\r\n\r\n return [firewall\r\n for firewall in self.account.getNetworkVlans(mask=mask)\r\n if has_firewall(firewall)]", "def show_firewall_policy(self, firewall_policy, **_params):\r\n return self.get(self.firewall_policy_path % (firewall_policy),\r\n params=_params)", "def firewalls(self) -> Sequence['outputs.SubResourceResponse']:\n return pulumi.get(self, \"firewalls\")", "def get_fw_ip_and_port(user, passw, logger, server_url):\n ip, port = get_ip_and_port('firewall', get_paos(user, passw, server_url, logger))\n return f\"{ip}:{port}\"", "def pull_info(task):\n\n interface_result = task.run(task=send_command, command=\"show interfaces\")\n task.host[\"facts\"] = interface_result.scrapli_response.genie_parse_output()\n interfaces = task.host[\"facts\"]\n for interface in interfaces:\n try:\n mac_addr = interfaces[interface][\"mac_address\"]\n if target == mac_addr:\n target_list.append(mac_addr)\n intf = interface\n print_info(task, intf)\n except KeyError:\n pass", "def _get_fwl_billing_item(self, firewall_id, dedicated=False):\r\n mask = ('mask[id,billingItem[id]]')\r\n if dedicated:\r\n fwl_svc = self.client['Network_Vlan_Firewall']\r\n else:\r\n fwl_svc = self.client['Network_Component_Firewall']\r\n return fwl_svc.getObject(id=firewall_id, mask=mask)", "def get_dedicated_fwl_rules(self, firewall_id):\r\n svc = self.client['Network_Vlan_Firewall']\r\n return svc.getRules(id=firewall_id, mask=RULE_MASK)", "def get_firewall_health(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/health\")\n if request.ok:\n return True\n else:\n logger.warning(f\"Firewall health check failed with code {request.status_code}.\")\n dashboard_log.append({\"message\": f\"Firewall health check failed with code {request.status_code}.\",\n \"time\": time.time()})\n return False\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n return False", "def list_firewalls(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n\r\n return self.list('firewalls', self.firewalls_path, retrieve_all,\r\n **_params)", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))", "def get_firewall_rule(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_firewall_rule(\n name_or_id, ignore_missing=True, **filters\n )", "def check_linux_firewall():\n try:\n if \"active (running)\" in str(subprocess.check_output(\n 'systemctl status firewalld', shell=True)):\n firewall_package = \"firewalld\"\n return firewall_package\n except subprocess.CalledProcessError:\n try:\n if \"active (exited)\" in str(subprocess.check_output(\n 'systemctl status iptables', shell=True)):\n firewall_package = \"iptables\"\n return firewall_package\n except subprocess.CalledProcessError:\n try:\n if \"active (exited)\" in str(subprocess.check_output(\n 'systemctl status ufw', shell=True)):\n firewall_package = \"ufw\"\n return firewall_package\n except subprocess.CalledProcessError:\n return False", "def get_blocked_ips(logger, dashboard_log, firewall_ip_and_port):\n try:\n request = requests.get(f\"http://{firewall_ip_and_port}/firewall/blocked\")\n if request.ok:\n return request.json()\n else:\n logger.warning(f\"Getting blocked IPs on firewall failed with code {request.status_code}\")\n dashboard_log.append({\"message\": f\"Getting blocked IPs on firewall failed with code {request.status_code}\",\n \"time\": time.time()})\n except requests.exceptions.ConnectionError as e:\n logger.error(f\"Can't connect to firewall wrapper. {e}\")\n dashboard_log.append({\"message\": \"Can't connect to firewall wrapper.\",\n \"time\": time.time()})\n # error, continue program", "def _get_firewall_rules(firewall_rules):\n ret = []\n for key, value in firewall_rules.items():\n # Verify the required 'protocol' property is present in the cloud\n # profile config\n if \"protocol\" not in firewall_rules[key].keys():\n raise SaltCloudConfigError(\n \"The firewall rule '{}' is missing 'protocol'\".format(key)\n )\n ret.append(\n FirewallRule(\n name=key,\n protocol=firewall_rules[key].get(\"protocol\", None),\n source_mac=firewall_rules[key].get(\"source_mac\", None),\n source_ip=firewall_rules[key].get(\"source_ip\", None),\n target_ip=firewall_rules[key].get(\"target_ip\", None),\n port_range_start=firewall_rules[key].get(\"port_range_start\", None),\n port_range_end=firewall_rules[key].get(\"port_range_end\", None),\n icmp_type=firewall_rules[key].get(\"icmp_type\", None),\n icmp_code=firewall_rules[key].get(\"icmp_code\", None),\n )\n )\n\n return ret", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"end_ip\"] = None\n __props__[\"name\"] = None\n __props__[\"start_ip\"] = None\n __props__[\"type\"] = None\n return FirewallRule(resource_name, opts=opts, __props__=__props__)", "def get_firewall_rules(self, server):\n server_uuid, server_instance = uuid_and_instance(server)\n\n url = f'/server/{server_uuid}/firewall_rule'\n res = self.api.get_request(url)\n\n return [\n FirewallRule(server=server_instance, **firewall_rule)\n for firewall_rule in res['firewall_rules']['firewall_rule']\n ]", "def show(self, req, id):\n db_api.create_one_net()\n aa = db_api.get_one_net()\n return {\"show\":\"show\"}", "def show_config(): \n\n #Is single or multi node?\n nodes = hl.getAllNodes()\n if nodes:\n if request.method == \"POST\":\n nodeID = request.form['node1']\n\n else:\n nodeID = 1\n\n nodeReq = hl.getNode(\"ID\", nodeID)\n \n if nodeReq[\"Address\"] == \"self\":\n node = hl.getIptablesRules()\n else:\n res = hl.nodeGet(nodeReq[\"Address\"]+\"/getrules/\")\n if 'result' in res and res['result']:\n node = res[\"rules\"] \n else:\n node = None\n\n if node:\n return render_template('config.html', firewall = node, nodes = nodes, nodeID = hl.getNode(\"ID\", nodeID))\n else:\n flash(\"Error: cannot retrieve iptable rules from node\")\n else:\n return render_template('config.html', firewall = hl.getIptablesRules(), nodes = -1, nodeID = -1)", "def show_cdp(self):\n txt = \"\"\n for inf in self.interfaces:\n if self.interfaces[inf]['connect'] != ['none', 'none']:\n txt += \"%s interface %s connect to %s on interface %s\\n\"%(self.hostname, inf, self.interfaces[inf]['connect'][0], self.interfaces[inf]['connect'][1]) \n return txt", "def read_lwm2m_info():\n response, secure = lwm2m.get_lwm2m_security_info()\n \n if response != return_values.RESULT_SUCCESS:\n raise Exception(\"Failed to retrieve the lwm2m connection information. Return value {}.\".format(response))\n try:\n lwm2m_uri = \"coaps://\" + secure[\"LWM2M_HOST_NAME\"] + \":5684\"\n lwm2m_endpoint = secure[\"LWM2M_ENDPOINT\"]\n lwm2m_identity = secure[\"LWM2M_IDENTITY\"]\n lwm2m_security = secure[\"LWM2M_SECRET_KEY\"]\n except KeyError:\n raise Exception(\"The lwm2m security info message received from the api server is not in the expected format. Unable to proceed.\")\n \n return lwm2m_uri, lwm2m_endpoint, lwm2m_identity, lwm2m_security", "def get_network(isamAppliance, application_interface, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving the Application Interface Statistics\",\n \"/analysis/interface_statistics.json{0}\".format(\n tools.create_query_string(prefix=application_interface,\n timespan=statistics_duration)),requires_model=requires_model)", "def show_interface(enode, dev, shell=None):\n assert dev\n\n cmd = 'ip addr list dev {ldev}'.format(ldev=dev)\n response = enode(cmd, shell=shell)\n\n first_half_dict = _parse_ip_addr_show(response)\n\n d = None\n if (first_half_dict):\n cmd = 'ip -s link list dev {ldev}'.format(ldev=dev)\n response = enode(cmd, shell=shell)\n second_half_dict = _parse_ip_stats_link_show(response)\n\n d = first_half_dict.copy()\n d.update(second_half_dict)\n return d", "def get_policy(client, policy_name):\n response = client.describe_firewall_policy(\n FirewallPolicyName=policy_name,\n )\n return response", "def get_firewall_rule(self, server_uuid, firewall_rule_position, server_instance=None):\n url = f'/server/{server_uuid}/firewall_rule/{firewall_rule_position}'\n res = self.api.get_request(url)\n return FirewallRule(**res['firewall_rule'])", "def create_firewall(context):\n return [{\n 'type': 'templates/firewall.py',\n 'name': 'fc-firewall',\n 'properties': {\n 'projectId':\n '$(ref.fc-project.projectId)',\n 'network':\n '$(ref.fc-network.selfLink)',\n 'dependsOn':\n '$(ref.fc-network.resourceNames)',\n 'rules': [\n {\n 'name': 'allow-internal',\n 'description': 'Allow internal traffic on the network.',\n 'allowed': [{\n 'IPProtocol': 'icmp',\n }, {\n 'IPProtocol': 'tcp',\n 'ports': ['0-65535'],\n }, {\n 'IPProtocol': 'udp',\n 'ports': ['0-65535'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['10.128.0.0/9'],\n 'priority': 65534,\n },\n {\n 'name': 'leonardo-ssl',\n 'description': 'Allow SSL traffic from Leonardo-managed VMs.',\n 'allowed': [{\n 'IPProtocol': 'tcp',\n 'ports': ['443'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['0.0.0.0/0'],\n 'targetTags': ['leonardo'],\n },\n ],\n },\n }]", "async def fetch_clan(self, id: utils.Intable) -> Clan | None:\n id64 = make_id64(id=id, type=Type.Clan)\n return await self._connection.fetch_clan(id64)", "def get_firewall_policy(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_firewall_policy(\n name_or_id, ignore_missing=True, **filters\n )" ]
[ "0.7587161", "0.62764543", "0.6244351", "0.58603615", "0.58502734", "0.56528944", "0.5448396", "0.54362357", "0.53930527", "0.5256758", "0.5219113", "0.51706636", "0.51443547", "0.5079199", "0.50502574", "0.4981944", "0.4909233", "0.48432657", "0.48150116", "0.47847328", "0.47453088", "0.47258282", "0.47083756", "0.4708296", "0.46785337", "0.46525955", "0.46522516", "0.4642199", "0.462107", "0.46141726" ]
0.802783
0
Creates a new firewall.
def create_firewall(self, body=None): return self.post(self.firewalls_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_firewall(context):\n return [{\n 'type': 'templates/firewall.py',\n 'name': 'fc-firewall',\n 'properties': {\n 'projectId':\n '$(ref.fc-project.projectId)',\n 'network':\n '$(ref.fc-network.selfLink)',\n 'dependsOn':\n '$(ref.fc-network.resourceNames)',\n 'rules': [\n {\n 'name': 'allow-internal',\n 'description': 'Allow internal traffic on the network.',\n 'allowed': [{\n 'IPProtocol': 'icmp',\n }, {\n 'IPProtocol': 'tcp',\n 'ports': ['0-65535'],\n }, {\n 'IPProtocol': 'udp',\n 'ports': ['0-65535'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['10.128.0.0/9'],\n 'priority': 65534,\n },\n {\n 'name': 'leonardo-ssl',\n 'description': 'Allow SSL traffic from Leonardo-managed VMs.',\n 'allowed': [{\n 'IPProtocol': 'tcp',\n 'ports': ['443'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['0.0.0.0/0'],\n 'targetTags': ['leonardo'],\n },\n ],\n },\n }]", "def create_firewall_rule(self, body=None):\r\n return self.post(self.firewall_rules_path, body=body)", "def test_create_firewall_with_all_params(self):\r\n resource = 'firewall'\r\n cmd = firewall.CreateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n policy_id = 'my-policy-id'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--description', description,\r\n '--shared',\r\n '--admin-state-down',\r\n '--tenant-id', tenant_id,\r\n policy_id]\r\n position_names = ['firewall_policy_id', ]\r\n position_values = [policy_id, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description,\r\n shared=True, admin_state_up=False,\r\n tenant_id=tenant_id)", "def create_firewall_rule(project):\n listed_rules = subprocess.check_output(\n ['gcloud', 'compute', 'firewall-rules', 'list',\n '--format', 'value(name)',\n '--filter', 'name=%s' % LEO_FIREWALL_RULE,\n '--project', project])\n if LEO_FIREWALL_RULE in listed_rules:\n return\n Print.GN('Creating firewall rule for Leonardo VM.')\n subprocess.check_call(\n ['gcloud', 'compute', 'firewall-rules', 'create',\n LEO_FIREWALL_RULE,\n '--allow', 'tcp:80,tcp:443',\n '--priority', '900',\n '--target-tags', LEO_FIREWALL_RULE,\n '--project', project])", "def test_create_firewall_with_mandatory_params(self):\r\n resource = 'firewall'\r\n cmd = firewall.CreateFirewall(test_cli20.MyApp(sys.stdout), None)\r\n name = ''\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n policy_id = 'my-policy-id'\r\n args = ['--tenant-id', tenant_id, policy_id, ]\r\n position_names = ['firewall_policy_id', ]\r\n position_values = [policy_id, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True, tenant_id=tenant_id)", "def create_firewall_policy(self, body=None):\r\n return self.post(self.firewall_policies_path, body=body)", "def create_firewall_rule(self, server, firewall_rule_body):\n server_uuid, server_instance = uuid_and_instance(server)\n\n url = f'/server/{server_uuid}/firewall_rule'\n body = {'firewall_rule': firewall_rule_body}\n res = self.api.post_request(url, body)\n\n return FirewallRule(server=server_instance, **res['firewall_rule'])", "def _setup_create_firewall_rule_with_all_params(self, protocol='tcp'):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n source_ip = '192.168.1.0/24'\r\n destination_ip = '192.168.2.0/24'\r\n source_port = '0:65535'\r\n destination_port = '0:65535'\r\n action = 'allow'\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--protocol', protocol,\r\n '--source-ip-address', source_ip,\r\n '--destination-ip-address', destination_ip,\r\n '--source-port', source_port,\r\n '--destination-port', destination_port,\r\n '--action', action,\r\n '--enabled',\r\n '--admin-state-up',\r\n '--tenant-id', tenant_id]\r\n position_names = []\r\n position_values = []\r\n if protocol == 'any':\r\n protocol = None\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n protocol=protocol,\r\n source_ip_address=source_ip,\r\n destination_ip_address=destination_ip,\r\n source_port=source_port,\r\n destination_port=destination_port,\r\n action=action, enabled=True,\r\n tenant_id=tenant_id)", "def create_firewall_rule(self, ipaddressid, protocol, cidrlist, ftype, \n startport=None, endport=None,\n icmpcode=None, icmptype=None): \n params = {'command':'createFirewallRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'cidrlist':cidrlist,\n 'type':ftype}\n\n if startport:\n params['startport'] = startport\n if endport:\n params['endport'] = endport\n if icmpcode:\n params['icmpcode'] = icmpcode\n if icmptype:\n params['icmptype'] = icmptype \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createfirewallruleresponse']['jobid']\n self.logger.debug('Start job - createfirewallruleresponse: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def apply_firewall(client, firewall_name, firewall_configs):\n subnets = [{\"SubnetId\": subnet} for subnet in firewall_configs[\"subnet_ids\"]]\n policy_response = get_policy(client, firewall_configs[\"policy\"])\n policy_arn = policy_response[\"FirewallPolicyResponse\"][\"FirewallPolicyArn\"]\n\n # Check if firewall exists and updates the policy if necessary\n try:\n get_firewall_response = get_firewall(client, firewall_name)\n print(f\"AWS Firewall {firewall_name} already exists.\")\n current_policy_arn = get_firewall_response[\"Firewall\"][\"FirewallPolicyArn\"]\n if current_policy_arn != policy_arn:\n print(\"Updating policy association...\")\n # This needs retry/backoff logic in case UpdateToken is in use\n update_token = get_firewall_response[\"UpdateToken\"]\n update_response = client.associate_firewall_policy(\n UpdateToken=update_token,\n FirewallName=firewall_name,\n FirewallPolicyArn=policy_arn\n )\n return update_response\n else:\n print(f\"Policy on AWS Firewall {firewall_name} hasn't changed, skipping...\")\n return None\n except client.exceptions.ResourceNotFoundException:\n print(f\"Creating AWS Firewall {firewall_name}...\")\n\n response = client.create_firewall(\n FirewallName=firewall_name,\n FirewallPolicyArn=policy_arn,\n VpcId=firewall_configs[\"vpc_id\"],\n SubnetMappings=subnets\n )\n return response", "def create_firewall_group(self, **kwargs):\n self._lookup_ingress_egress_firewall_policy_ids(kwargs)\n if 'ports' in kwargs:\n kwargs['ports'] = self._get_port_ids(kwargs['ports'])\n return self.network.create_firewall_group(**kwargs)", "def fusion_api_create_fc_network(self, body, api=None, headers=None):\n return self.fc_network.create(body, api, headers)", "def add(self, source, destination, port):\n logger.info('Adding path from %s to %s on port %s', source, destination, port)\n rules = [{\"IPProtocol\": \"tcp\", \"ports\": [int(port)]}]\n src_tags, dest_tags, src_ranges, _ = self._extract_service_info(\n source, destination)\n firewall_name = \"bu-%s-%s-%s\" % (destination.network.name, destination.name, port)\n try:\n firewall = self.driver.ex_get_firewall(firewall_name)\n if isinstance(source, CidrBlock):\n if not firewall.source_ranges:\n firewall.source_ranges = []\n firewall.source_ranges.append(str(source.cidr_block))\n logger.info(firewall.source_ranges)\n if isinstance(source, Service):\n if not firewall.source_tags:\n firewall.source_tags = []\n source_tag = \"%s-%s\" % (source.network.name, source.name)\n firewall.source_tags.append(source_tag)\n logger.info(firewall.source_tags)\n firewall = self.driver.ex_update_firewall(firewall)\n except ResourceNotFoundError:\n logger.info(\"Firewall %s not found, creating.\", firewall_name)\n firewall = self.driver.ex_create_firewall(firewall_name, allowed=rules,\n network=destination.network.name,\n source_ranges=src_ranges,\n source_tags=src_tags,\n target_tags=dest_tags)\n return Path(destination.network, source, destination, \"tcp\", port)", "def test_create_firewall_policy_with_all_params(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n firewall_rules_arg = 'rule_id1 rule_id2'\r\n firewall_rules_res = ['rule_id1', 'rule_id2']\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--firewall-rules', firewall_rules_arg,\r\n '--audited',\r\n '--tenant-id', tenant_id,\r\n '--admin-state_up',\r\n name]\r\n position_names = ['name', ]\r\n position_values = [name, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n firewall_rules=firewall_rules_res,\r\n audited=True, admin_state_up=True,\r\n tenant_id=tenant_id)", "def __init__(self):\n log.debug(\"Firewall initialized.\")", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlPortFwd_Create'))", "def Create(self,\n firewall_policy=None,\n parent_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeCreateRequestTuple(firewall_policy, parent_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Insert(\n self._MakeCreateRequestTuple(firewall_policy, parent_id)[2])\n return self.WaitOperation(\n op_res, message='Creating the organization firewall policy.')", "def create_port(self, body=None):\r\n return self.post(self.ports_path, body=body)", "def create(self, name, network_id, fixed_ips, host_id=None, profile=None,\n vnic_type=None, device_owner=None, device_id=None, \n security_groups=None, mac_address=None, tenant_id=None):\n data = {\n \"port\": {\n \"network_id\": network_id,\n \"name\": name,\n \"admin_state_up\": True,\n \"fixed_ips\": fixed_ips,\n }\n }\n if tenant_id is not None:\n data[u'port'][u'tenant_id'] = tenant_id\n if host_id is not None:\n data['port']['binding:host_id'] = host_id\n if profile is not None:\n data['port']['binding:profile'] = profile\n if host_id is not None:\n data['port']['binding:vnic_type'] = vnic_type\n if device_owner is not None:\n data['port']['device_owner'] = device_owner\n if device_id is not None:\n data['port']['device_id'] = device_id\n if security_groups is not None:\n data['port']['security_groups'] = security_groups\n if mac_address is not None:\n data[u'allowed_address_pairs'] = [{u'mac_address':mac_address,\n u'ip_address':fixed_ips[0][u'ip_address']}]\n\n path = '%s/ports' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack port: %s' % truncate(res))\n return res[0]['port']", "def create(openstack_resource):\n # Update port config before create port\n _update_port_config(openstack_resource.config)\n\n # Create port\n created_resource = openstack_resource.create()\n ipv4_list, ipv6_list = _get_fixed_ips_from_port(created_resource)\n fixed_ips = ipv4_list + ipv6_list\n _export_ips_to_port_instance(ipv4_list, ipv6_list)\n\n # Handle runtime properties\n update_runtime_properties(\n {\n RESOURCE_ID: created_resource.id,\n 'fixed_ips': fixed_ips,\n 'mac_address': created_resource.mac_address,\n 'allowed_address_pairs': created_resource.allowed_address_pairs,\n }\n )", "def write_firewall_script():\n ip_address = Setup.parse_options()['ip_address']\n port = Setup.parse_options()['port']\n interface = Setup.parse_options()['interface']\n\n firewall_script = \"\"\"#!/bin/bash\n iptables -F\n iptables -t nat -A PREROUTING -p tcp -i %s -d %s --dport 80 -j DNAT --to %s:%s\n iptables -t nat -A PREROUTING -p tcp -i %s -d %s --dport 443 -j DNAT --to %s:%s\n iptables -A FORWARD -p tcp -i %s -d %s --dport %s -j ACCEPT\n iptables -A INPUT -i lo -j ACCEPT\n iptables -A INPUT -p tcp -m tcp --dport %s -j ACCEPT\n iptables -P OUTPUT ACCEPT\n iptables -P INPUT DROP\n \"\"\" % (interface, ip_address, ip_address, port,\n interface, ip_address, ip_address, port,\n interface, ip_address, port,\n port)\n\n directory = 'resources'\n if not os.path.exists(directory):\n os.makedirs(directory)\n rules = file('resources/rules.sh', 'w')\n rules.write(firewall_script)\n rules.close()\n subprocess.call([\"chmod\", \"755\", \"resources/rules.sh\"])\n subprocess.call(\"./resources/rules.sh\")", "def Create(self,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.AddRule(\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Adding a rule to the organization firewall policy.')", "def add_windows_firewall_rule(attacker_ip, listening_ip):\n try:\n add_rule_result = subprocess.check_output(\n 'netsh advfirewall firewall add rule name=\"flytrap - \"'\n + attacker_ip + ' description=\"Rule automatically added by '\n 'flytrap.\" dir=in action=block '\n 'protocol=any localip=' + listening_ip +\n ' remoteip=' + attacker_ip)\n if \"Ok.\" in str(add_rule_result):\n print(attacker_ip + \" has been successfully blocked.\")\n else:\n print(\"Error adding firewall rule to block \" + attacker_ip)\n except subprocess.CalledProcessError:\n print(\"Unable to add firewall rule. Flytrap needs to be run as \"\n \"administrator.\")", "def create():\n app = Flask(__name__, instance_relative_config=False)\n\n app.config.update(\n FLASK_ENV=\"development\",\n DEBUG=True,\n SECRET_KEY=\"Segredo\",\n SQLALCHEMY_DATABASE_URI=f\"sqlite:////tmp/escola.db\",\n SQLALCHEMY_ECHO=False,\n SQLALCHEMY_TRACK_MODIFICATIONS=False\n )\n\n db.init_app(app)\n\n with app.app_context():\n from . import routes\n db.create_all()\n\n return app", "def show_firewall(self, firewall, **_params):\r\n return self.get(self.firewall_path % (firewall), params=_params)", "def cloudflare_waf_firewall_rule_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n action = args['action']\n zone_id = args.get('zone_id', client.zone_id)\n filter_id = args.get('filter_id')\n filter_expression = args.get('filter_expression')\n products = argToList(args.get('products'))\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n priority = arg_to_number(args.get('priority'))\n ref = args.get('ref')\n\n response = client.cloudflare_waf_firewall_rule_create_request(\n action, zone_id,\n description=description, products=products, paused=paused, priority=priority, ref=ref,\n filter_id=filter_id, filter_expression=filter_expression)\n\n output = response['result']\n firewall_rule_output = output[0]\n\n firewall_rule = [{'id': dict_safe_get(firewall_rule_output, ['id']),\n 'action': dict_safe_get(firewall_rule_output, ['action']),\n 'paused': dict_safe_get(firewall_rule_output, ['paused']),\n 'description': dict_safe_get(firewall_rule_output, ['description']),\n 'filter_id': dict_safe_get(firewall_rule_output, ['filter', 'id']),\n 'filter_expression': dict_safe_get(firewall_rule_output, ['filter', 'expression']),\n 'products': dict_safe_get(firewall_rule_output, ['products']),\n 'ref': dict_safe_get(firewall_rule_output, ['ref']),\n 'priority': dict_safe_get(firewall_rule_output, ['priority']),\n 'zone_id': zone_id}]\n\n readable_output = tableToMarkdown(\n name='Firewall rule was successfully created.',\n t=firewall_rule,\n headers=['id', 'action', 'filter_id', 'filter_expression', 'products', 'priority', 'paused', 'description', 'ref'],\n headerTransform=string_to_table_header\n )\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.FirewallRule',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def createTunnel(self, name, targetIP):\r\n return self._ref.callRemote('createTunnel', name, targetIP)", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))", "def test_create_firewall_group_compact(self):\n firewall_group = deepcopy(self._mock_firewall_group_attrs)\n del firewall_group['ports']\n del firewall_group['egress_firewall_policy']\n del firewall_group['ingress_firewall_policy']\n created_firewall = deepcopy(firewall_group)\n created_firewall.update(\n egress_firewall_policy_id=None,\n ingress_firewall_policy_id=None,\n ports=[],\n )\n del firewall_group['id']\n self.register_uris(\n [\n dict(\n method='POST',\n uri=self._make_mock_url('firewall_groups'),\n json={'firewall_group': created_firewall},\n validate=dict(json={'firewall_group': firewall_group}),\n )\n ]\n )\n r = self.cloud.create_firewall_group(**firewall_group)\n self.assertDictEqual(\n FirewallGroup(connection=self.cloud, **created_firewall).to_dict(),\n r.to_dict(),\n )\n self.assert_calls()", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)" ]
[ "0.69996005", "0.6987328", "0.6905342", "0.6708049", "0.66985524", "0.6572095", "0.6032799", "0.59174097", "0.58864784", "0.57559496", "0.5744627", "0.5549965", "0.5435781", "0.5376397", "0.5296857", "0.5275886", "0.5272854", "0.525992", "0.5238594", "0.52326596", "0.5226379", "0.5202005", "0.518869", "0.5162895", "0.51541364", "0.5149524", "0.5110743", "0.51082236", "0.51072633", "0.5092379" ]
0.84064144
0
Deletes the specified firewall.
def delete_firewall(self, firewall): return self.delete(self.firewall_path % (firewall))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_firewall_rule(self, firewall_rule):\r\n return self.delete(self.firewall_rule_path % (firewall_rule))", "def delete_firewall_policy(self, firewall_policy):\r\n return self.delete(self.firewall_policy_path % (firewall_policy))", "def delete_firewall_rule(self, server_uuid, firewall_rule_position):\n url = f'/server/{server_uuid}/firewall_rule/{firewall_rule_position}'\n return self.api.delete_request(url)", "def delete_firewall_rule(self, firewall_rule_id): \n params = {'command':'deleteFirewallRule',\n 'id':firewall_rule_id} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletefirewallruleresponse']['jobid']\n self.logger.debug('Start job - deleteFirewallRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def delete_firewall_rule(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_rule = self.network.find_firewall_rule(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_rule(\n firewall_rule, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall rule %s not found for deleting', name_or_id\n )\n return False\n return True", "def stop_firewall():\r\n connections.execute_shell_command('sc stop TmPfw')", "def test_esg_firewall_rule_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|id', esg_firewall.delete,\n {'rule': {\n 'esg_id': 'esg_id'\n }},\n ['firewallRule'], {\n 'uri_parameters': {'edgeId': 'esg_id', 'ruleId': 'id'}\n },\n additional_params=['rule_id']\n )", "def delete_firewall_policy(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_policy = self.network.find_firewall_policy(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_policy(\n firewall_policy, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall policy %s not found for deleting', name_or_id\n )\n return False\n return True", "def show_firewall(self, firewall, **_params):\r\n return self.get(self.firewall_path % (firewall), params=_params)", "def delete(self):\n self.rpc.call(MsfRpcMethod.DbDelWorkspace, [{'workspace': self.name}])", "def create_firewall(self, body=None):\r\n return self.post(self.firewalls_path, body=body)", "def Delete(self, fp_id=None, batch_mode=False, only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeDeleteRequestTuple(fp_id=fp_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Delete(self._MakeDeleteRequestTuple(fp_id=fp_id)[2])\n operation_poller = DeletePoller(self._service, self.ref)\n return self.WaitOperation(\n op_res,\n operation_poller=operation_poller,\n message='Deleting the organization firewall policy.')", "def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover", "def delete_workspace(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"delete_workspace\", data)", "def cloudflare_waf_firewall_rule_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n rule_id = args['id']\n zone_id = args.get('zone_id', client.zone_id)\n\n response = client.cloudflare_waf_firewall_rule_delete_request(rule_id, zone_id)\n\n return CommandResults(\n readable_output=f'Firewall rule {rule_id} was successfully deleted.',\n raw_response=response\n )", "def firewall_policy_remove_rule(self, firewall_policy, body=None):\r\n return self.put(self.firewall_policy_remove_path % (firewall_policy),\r\n body=body)", "def cancel_firewall(self, firewall_id, dedicated=False):\r\n fwl_billing = self._get_fwl_billing_item(firewall_id, dedicated)\r\n billing_id = fwl_billing['billingItem']['id']\r\n billing_item = self.client['Billing_Item']\r\n return billing_item.cancelService(id=billing_id)", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def delete_firewall_group(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_group = self.network.find_firewall_group(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_group(\n firewall_group, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall group %s not found for deleting', name_or_id\n )\n return False\n return True", "def Delete(self,\n priority=None,\n firewall_policy_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.RemoveRule(\n self._MakeDeleteRuleRequestTuple(\n priority=priority, firewall_policy=firewall_policy_id)[2])\n return self.WaitOperation(\n op_res,\n message='Deleting a rule from the organization firewall policy.')", "def delete_listener(self, context, listener):\n LOG.info(\"Received request 'Delete Listener' for LB:%(lb)s \",\n {'lb': listener['loadbalancer_id']})\n arg_dict = {'context': context,\n lb_const.LISTENER: listener,\n }\n self._send_event(lb_const.EVENT_DELETE_LISTENER_V2, arg_dict,\n serialize=True,\n binding_key=listener['loadbalancer_id'],\n key=listener['id'])", "def delete(self, id):\n context = request.environ.get('context')\n dbapi.net_interfaces_delete(context, id)\n return None, 204, None", "def delete(self, db: Session) -> Optional[FidesopsBase]:\n _ = [rule.delete(db=db) for rule in self.rules]\n return super().delete(db=db)", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]) and \\\n not all([i in kwargs for i in ('proto', 'port')]):\n raise TypeError('Expected host or port/proto pair.')\n self.dbdel('service', kwargs)", "def remove_firewall_rule_from_machine(\n self,\n machine: Union[dto.Machine, str],\n firewall_rule: Union[dto.FirewallRule, str]\n ) -> Iterable[dto.FirewallGroup]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def delete(self):\n logger.info('Delete the port chain: %s' % self.name)\n # Delete port chain\n self.pc_client.delete('port_chain', self.name)\n\n logger.info('Delete the flow classifier.')\n self.pc_client.delete('flow_classifier', self.flow_conf['name'])\n\n # Delete all port pair groups\n logger.info('Delete port pair groups and port pairs.')\n srv_ppgrp_lst = self.srv_chain.get_srv_ppgrp_id()\n for grp_idx in range(len(srv_ppgrp_lst)):\n pp_grp_name = 'pp_grp_%s' % grp_idx\n self.pc_client.delete('port_pair_group', pp_grp_name)\n\n # Delete all port pairs\n for grp_idx, pp_grp in enumerate(srv_ppgrp_lst):\n for pp_idx in range(len(pp_grp)):\n pp_name = 'pp_%s_%s' % (grp_idx, pp_idx)\n self.pc_client.delete('port_pair', pp_name)", "def delete(env, identifier, listener):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n try:\n mgr.remove_lb_listener(uuid, listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def test_create_update_delete_firewall_rule(self):\n ctx = context.get_admin_context()\n clnt = self.plugin._client\n with self.firewall_rule(do_delete=False) as fwr:\n fwr_id = fwr['firewall_rule']['id']\n # Create Firewall Rule\n crd_rule = {'firewall_rule': fwr}\n clnt.create_firewall_rule.assert_called_once_with(fwr)\n # Update Firewall Rule\n data = {'firewall_rule': {'name': 'new_rule_name',\n 'source_port': '10:20',\n 'destination_port': '30:40'}}\n fw_rule = self.plugin.update_firewall_rule(ctx, fwr_id, data)\n crd_rule = {'firewall_rule': fw_rule}\n clnt.update_firewall_rule.assert_called_once_with(fwr_id, crd_rule)\n # Delete Firewall Rule\n self.plugin.delete_firewall_rule(ctx, fwr_id)\n clnt.delete_firewall_rule.assert_called_once_with(fwr_id)", "def delete(self, request, flavor_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor(flavor_id,\n ignore_missing=True)", "def cloudflare_waf_ip_list_delete_request(self, list_id: str) -> Dict[str, Any]:\n return self._http_request(\n method='DELETE',\n url_suffix=f'accounts/{self.account_id}/rules/lists/{list_id}')" ]
[ "0.6898676", "0.64936376", "0.6319546", "0.59047914", "0.58547926", "0.5721806", "0.56004256", "0.5584247", "0.55730397", "0.55495495", "0.5423727", "0.5345709", "0.525416", "0.5244898", "0.5219103", "0.5215542", "0.5201969", "0.51794297", "0.5145191", "0.5130948", "0.5072114", "0.5026695", "0.49941516", "0.49918085", "0.49884862", "0.49787146", "0.49736854", "0.4963843", "0.49571905", "0.49448124" ]
0.88908124
0
Remove a router from l3 agent.
def remove_router_from_l3_agent(self, l3_agent, router_id): return self.delete((self.agent_path + self.L3_ROUTERS + "/%s") % ( l3_agent, router_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_router_from_l3_agent(self):\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])\n\n with self.override_role():\n self.agents_client.delete_router_from_l3_agent(\n self.agent['id'], router_id=self.router['id'])", "def delete_router(self, router):\r\n return self.delete(self.router_path % (router))", "def remove_gateway_router(self, router):\r\n return self.put((self.router_path % router),\r\n body={'router': {'external_gateway_info': {}}})", "def delete(self, oid):\n path = '%s/routers/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack router: %s' % truncate(res))\n return res[0]", "def remove_interface_router(self, router, body=None):\r\n return self.put((self.router_path % router) +\r\n \"/remove_router_interface\", body=body)", "def _delete_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def delete_router(self, router_id):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers/\" + \\\n router_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Router delete Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted router:%s \" % router_id)\n return True", "def delete_router(self, name_or_id):\n router = self.network.find_router(name_or_id, ignore_missing=True)\n if not router:\n self.log.debug(\"Router %s not found for deleting\", name_or_id)\n return False\n\n self.network.delete_router(router)\n\n return True", "def unregister_router(self, hostname):", "def remove_agent(self):\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)\n\n if self.agent_type == \"zombie\":\n self.model.infected -= 1\n elif self.agent_type == \"human\":\n self.model.susceptible -= 1\n del self", "def router_gateway_clear(mgr_or_client, router_id, *args, **kwargs):\n net_client = _g_router_client(mgr_or_client)\n router = router_show(mgr_or_client, router_id)\n router_id = router['id']\n body = net_client.update_router(router_id,\n external_gateway_info=dict())\n return body['router']", "def test_create_router_on_l3_agent(self):\n with self.override_role():\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])", "def del_returned_route_on_gw(self, context, router_id, subnet_id):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': '169.254.128.2'}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.delete_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def add_router_to_l3_agent(self, l3_agent, body):\r\n return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent,\r\n body=body)", "def delete_agent(self, agent):\r\n return self.delete(self.agent_path % (agent))", "def del_router_info(self):\n debug.info(0,\"Erasing router info\")\n layer_num = techlayer[\"text\"]\n self.cell.objs = [x for x in self.cell.objs if x.layerNumber != layer_num]", "def removeTunnel (self, sourcemachineguid, sourceport, destinationmachineguid, destinationport, jobguid = \"\", executionparams = {}):\n params =dict()\n params['sourceport'] = sourceport\n params['destinationmachineguid'] = destinationmachineguid\n params['sourcemachineguid'] = sourcemachineguid\n params['destinationport'] = destinationport\n return q.workflowengine.actionmanager.startActorAction('ras', 'removeTunnel', params, jobguid=jobguid, executionparams=executionparams)", "def delete(self):\n self.model.remove_agents(self)", "def dereference(self, peer, and_router=False):\n if peer == self.node:\n return\n\n self.peers.remove(peer)\n if and_router != True:\n return\n\n router = filter(lambda x: x.node == peer, self.routers)\n if not any(router): return\n self.routers.remove(router[0])", "def pre_bgp_router_delete(self, resource_id):\n pass", "def router_interface_delete(mgr_or_client, router_id, subnet_id,\n *args, **kwargs):\n net_client = _g_router_client(mgr_or_client)\n router = router_show(mgr_or_client, router_id)\n router_id = router['id']\n return net_client.remove_router_interface(router_id,\n subnet_id=subnet_id)", "def post_bgp_router_delete(self, resource_id, resource_dict):\n pass", "def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return", "def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)", "def remove(self, v: Route): # pylint: disable=arguments-differ, undefined-variable\n i = hash(str(v.addr) + str(v.nh))\n if i not in self._destinations:\n raise KeyError(\"{} key not found in the history RIB\".format(i))\n if v in self._destinations[i]:\n self._destinations[i].remove(v)\n if len(self._destinations[i]) == 0:\n del self._destinations[i]\n return None\n return v", "def remove(self, destination: n):\n try:\n self.connections.pop(destination)\n except KeyError:\n pass", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove_route(self, router_subnet_cidr, destination, next_hop):\n\n router = self._get_router_by_ip_address(router_subnet_cidr)\n\n logger.info(\"Removing static route from VIM %s router=%s destination=%s, nexthop=%s\",\n self.vim_name, router['id'], destination, next_hop)\n\n routes = router['routes']\n static_route = {\n \"destination\": destination,\n \"nexthop\": next_hop\n }\n try:\n routes.remove(static_route)\n static_routes = {\n \"router\": {\n \"routes\": routes\n }\n }\n\n result = self.neutron.update_router(router['id'], static_routes)\n\n logger.debug(\"Static route removed! %s\", str(result))\n\n except NeutronClientException as e:\n logger.error(\"Router %s: %s\", router['id'], e.message)\n raise VIMAgentsException(ERROR, e.message)\n\n except ValueError:\n logger.error(\"Static route not found in router %s! destination=%s, next_hop=%s\",\n router['name'], destination, next_hop)" ]
[ "0.7786139", "0.777706", "0.71951383", "0.6692688", "0.664114", "0.6546304", "0.6483943", "0.6482314", "0.6398428", "0.6340283", "0.6256002", "0.623186", "0.6205545", "0.61974275", "0.6168839", "0.60581374", "0.58679247", "0.58651936", "0.5855954", "0.58352876", "0.5760015", "0.5713752", "0.5649804", "0.5647829", "0.56282556", "0.5622866", "0.56086004", "0.558474", "0.558474", "0.55200565" ]
0.8547501
0
Fetches a loadbalancer agent hosting a pool.
def get_lbaas_agent_hosting_pool(self, pool, **_params): return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_agent_pool(agent_pool_name: Optional[str] = None,\n kubernetes_cluster_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAgentPoolResult:\n __args__ = dict()\n __args__['agentPoolName'] = agent_pool_name\n __args__['kubernetesClusterName'] = kubernetes_cluster_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:networkcloud:getAgentPool', __args__, opts=opts, typ=GetAgentPoolResult).value\n\n return AwaitableGetAgentPoolResult(\n administrator_configuration=pulumi.get(__ret__, 'administrator_configuration'),\n agent_options=pulumi.get(__ret__, 'agent_options'),\n attached_network_configuration=pulumi.get(__ret__, 'attached_network_configuration'),\n availability_zones=pulumi.get(__ret__, 'availability_zones'),\n count=pulumi.get(__ret__, 'count'),\n detailed_status=pulumi.get(__ret__, 'detailed_status'),\n detailed_status_message=pulumi.get(__ret__, 'detailed_status_message'),\n extended_location=pulumi.get(__ret__, 'extended_location'),\n id=pulumi.get(__ret__, 'id'),\n kubernetes_version=pulumi.get(__ret__, 'kubernetes_version'),\n labels=pulumi.get(__ret__, 'labels'),\n location=pulumi.get(__ret__, 'location'),\n mode=pulumi.get(__ret__, 'mode'),\n name=pulumi.get(__ret__, 'name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n taints=pulumi.get(__ret__, 'taints'),\n type=pulumi.get(__ret__, 'type'),\n upgrade_settings=pulumi.get(__ret__, 'upgrade_settings'),\n vm_sku_name=pulumi.get(__ret__, 'vm_sku_name'))", "def agent_pool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"agent_pool\")", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.find_pool(pool_id)\n pool = _get_sdk_object_dict(pool)\n\n if request.GET.get('includeChildResources'):\n resources = {}\n resources['pool'] = pool\n\n if pool.get('members'):\n member_list = _sdk_object_to_list(\n conn.load_balancer.members(pool_id))\n resources['members'] = member_list\n\n if pool.get('health_monitor_id'):\n monitor_id = pool['health_monitor_id']\n monitor = conn.load_balancer.find_health_monitor(\n monitor_id)\n monitor = _get_sdk_object_dict(monitor)\n resources['monitor'] = monitor\n\n return resources\n else:\n return pool", "def get_agent_pool_output(agent_pool_name: Optional[pulumi.Input[str]] = None,\n kubernetes_cluster_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAgentPoolResult]:\n ...", "def show_pool(self, pool, **_params):\r\n return self.get(self.pool_path % (pool), params=_params)", "def get_pool():\n app = get_app()\n return app['pool']", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n container_registry_name: Optional[pulumi.Input[str]] = None,\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None) -> 'RegistryAgentPool':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegistryAgentPoolState.__new__(_RegistryAgentPoolState)\n\n __props__.__dict__[\"container_registry_name\"] = container_registry_name\n __props__.__dict__[\"instance_count\"] = instance_count\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tier\"] = tier\n __props__.__dict__[\"virtual_network_subnet_id\"] = virtual_network_subnet_id\n return RegistryAgentPool(resource_name, opts=opts, __props__=__props__)", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def _get_pool (self, event):\n return self.pool", "def get(self, request, member_id, pool_id):\n conn = get_sdk_connection(request)\n member = conn.load_balancer.find_member(member_id, pool_id)\n return _get_sdk_object_dict(member)", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "def fusion_api_get_pool(self, uri=None, api=None, headers=None):\n return self.idpool.get(uri=uri, api=api, headers=headers)", "def get(self, request):\n pool_id = request.GET.get('poolId')\n conn = get_sdk_connection(request)\n health_monitor_list = _sdk_object_to_list(\n conn.load_balancer.health_monitors(\n project_id=request.user.project_id\n )\n )\n\n if pool_id:\n health_monitor_list = self._filter_health_monitors(\n health_monitor_list,\n pool_id)\n return {'items': health_monitor_list}", "def find_elb ( elb_conn, elb_name ) :\n try :\n elb_r = elb_conn.get_all_load_balancers( load_balancer_names = [ elb_name ] )\n if len( elb_r ) > 0 :\n return elb_r[ 0 ]\n except :\n return None", "def get_cluster_pool(cluster_pool_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterPoolResult:\n __args__ = dict()\n __args__['clusterPoolName'] = cluster_pool_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:hdinsight/v20230601preview:getClusterPool', __args__, opts=opts, typ=GetClusterPoolResult).value\n\n return AwaitableGetClusterPoolResult(\n aks_cluster_profile=pulumi.get(__ret__, 'aks_cluster_profile'),\n aks_managed_resource_group_name=pulumi.get(__ret__, 'aks_managed_resource_group_name'),\n cluster_pool_profile=pulumi.get(__ret__, 'cluster_pool_profile'),\n compute_profile=pulumi.get(__ret__, 'compute_profile'),\n deployment_id=pulumi.get(__ret__, 'deployment_id'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n log_analytics_profile=pulumi.get(__ret__, 'log_analytics_profile'),\n managed_resource_group_name=pulumi.get(__ret__, 'managed_resource_group_name'),\n name=pulumi.get(__ret__, 'name'),\n network_profile=pulumi.get(__ret__, 'network_profile'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n status=pulumi.get(__ret__, 'status'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def post(self, request):\n return create_loadbalancer(request)", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def verify_pool(self, pool):\n svc = self.pool_path % pool\n self.rest_get(svc, restclient.Status.OK)", "def get_pool(self, pool_name=None, pool_id=None):\n\n id_or_name = pool_id if pool_id else pool_name\n errormsg = \"Failed to get the pool {0} with error {1}\"\n\n try:\n obj_pool = self.unity_conn.get_pool(name=pool_name, _id=pool_id)\n\n if pool_id and obj_pool.existed:\n LOG.info(\"Successfully got the pool object %s\",\n obj_pool)\n return obj_pool\n if pool_name:\n LOG.info(\"Successfully got pool %s\", obj_pool)\n return obj_pool\n else:\n msg = \"Failed to get the pool with {0}\".format(\n id_or_name)\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n\n except Exception as e:\n msg = errormsg.format(id_or_name, str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def pool(self) -> asyncpg.pool.Pool:\n return self.bot.pool", "def l7pool_add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n pool_main = {\n 'name': args.get('name'),\n 'loadBalancingAlgorithm': args.get('method'),\n 'protocol': args.get('protocol')\n }\n\n pool_members = list(args.get('server'))\n\n pool_health = {\n 'interval': args.get('healthinterval'),\n 'timeout': args.get('healthtimeout'),\n 'maxRetries': args.get('healthretry'),\n 'urlPath': args.get('healthpath')\n }\n\n pool_sticky = {\n 'type': args.get('sticky')\n }\n\n try:\n mgr.add_lb_l7_pool(uuid, pool_main, pool_members, pool_health, pool_sticky)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get(self, request, pool_id):\n conn = get_sdk_connection(request)\n members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))\n return {'items': members_list}", "def get_agent_by_host(agent_host):\n session = db_api.get_session()\n with session.begin(subtransactions=True):\n query = session.query(agents_db.Agent)\n agent = query.filter(\n agents_db.Agent.host == agent_host,\n agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS,\n agents_db.Agent.admin_state_up.is_(True)).first()\n if agent and agent.is_active:\n return agent\n return None", "def associate_health_monitor(self, pool, body):\r\n return self.post(self.associate_pool_health_monitors_path % (pool),\r\n body=body)" ]
[ "0.6822763", "0.63551646", "0.6132398", "0.5843235", "0.57483953", "0.5659386", "0.5508824", "0.5492956", "0.5453422", "0.5433909", "0.5364477", "0.53580743", "0.5341485", "0.53034294", "0.5272875", "0.52552325", "0.5241654", "0.523493", "0.52341044", "0.5223258", "0.52208894", "0.5205478", "0.5201373", "0.5188098", "0.5171115", "0.516919", "0.5160325", "0.5137074", "0.51209277", "0.507429" ]
0.80421126
0
Fetches a list of pools hosted by the loadbalancer agent.
def list_pools_on_lbaas_agent(self, lbaas_agent, **_params): return self.get((self.agent_path + self.LOADBALANCER_POOLS) % lbaas_agent, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPools(self):\n data = self.connect('get','pools',None)\n return data", "async def get_pools(self) -> List[CachingPool]:\n return await self._pool_fetcher.get_pools()", "def pools(self) -> List[CachingPool]:\n return self._pool_fetcher.pools", "def list_pools(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('pools', self.pools_path, retrieve_all,\r\n **_params)", "def get(self, request):\n loadbalancer_id = request.GET.get('loadbalancerId')\n listener_id = request.GET.get('listenerId')\n conn = get_sdk_connection(request)\n pool_list = _sdk_object_to_list(conn.load_balancer.pools(\n project_id=request.user.project_id))\n\n if loadbalancer_id or listener_id:\n pool_list = self._filter_pools(pool_list,\n loadbalancer_id,\n listener_id)\n return {'items': pool_list}", "def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs", "def get_pools():\n pools = ch_core.hookenv.action_get('pools')\n if pools:\n return [p.strip() for p in pools.split(',')]\n return None", "def handle_cluster_pools(self, request):\n \"\"\"\n @api {get} /cluster/pools Get cluster pools\n @apiName GetClusterPools\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiDescription List pools and nodes registered into each.\n\n @apiSuccess {String[]} pool List of nodes registered into the pool.\n\n @apiSuccessExample {json} Example response:\n {\n \"pool1\": [\"node1\", \"node2\"],\n \"pool2: [\"node1\", \"node3\"]\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n return HTTPReply(body = json.dumps(self.cluster.pools), headers = headers)", "def listRewardPools(self):\n return self.get_json('/rewardPool')", "def pool_list(mnode):\n cmd = \"gluster pool list\"\n return g.run(mnode, cmd)", "def _get_pools(self, settings):\n available_pools = []\n pool_names = []\n connections_settings = self._get_connections_settings(settings)\n\n # Generate the names of the pools this settings can connect to\n for router_name, _ in connections_settings:\n pool_names.append(router_name)\n\n # Generate the names of the pools this settings can connect to\n for pool in self.__pools.get(settings.get(\"client_id\", \"No id\"), []):\n if pool.name in pool_names:\n available_pools.append(pool)\n return available_pools", "def node_pools(self) -> Sequence['outputs.NodePoolResponse']:\n return pulumi.get(self, \"node_pools\")", "def list_pools(self):\n search_opts = {'router:external': True}\n return [FloatingIpPool(pool) for pool\n in self.client.list_networks(**search_opts).get('networks')]", "def _get_pools():\n conn = libvirt.open(None)\n try:\n _spsfs = list()\n _spsnetfs = list()\n if conn:\n # file system pool\n _spsfs = conn.listAllStoragePools(flags=128)\n # nfs pool\n _spsnetfs = conn.listAllStoragePools(flags=256)\n else:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to collect vm pool data: %s', str(e))\n raise ValueError('Failed to collect vm pool data.') from e\n finally:\n conn.close()\n return _spsfs, _spsnetfs", "def pool_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n ret[pool_record[\"name_label\"]] = pool_record\n return ret", "def get_app_pool_list(self) -> list:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/applicationPools\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n result = list()\n\n for i in range(collection.Count):\n site = collection[i]\n prop = site.Properties\n\n name = prop[\"name\"].Value\n managed_runtime_version = prop[\"managedRuntimeVersion\"].Value\n if prop[\"managedPipelineMode\"].Value:\n managed_pipeline_mode = \"classic\"\n else:\n managed_pipeline_mode = \"integrated\"\n if prop[\"enable32BitAppOnWin64\"].Value:\n bitness = 32\n else:\n bitness = 64\n\n result.append(AppPool(name, managed_runtime_version, managed_pipeline_mode, bitness))\n\n return result", "def get_pool_list(mnode):\n ret, out, _ = g.run(mnode, \"gluster pool list --xml\", log_level='DEBUG')\n if ret != 0:\n g.log.error(\"Failed to execute 'pool list' on node %s. \"\n \"Hence failed to parse the pool list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster pool list xml output.\")\n return None\n\n pool_list_list = []\n for peer in root.findall(\"peerStatus/peer\"):\n peer_dict = {}\n for element in peer.getchildren():\n if element.tag == \"hostname\" and element.text == 'localhost':\n element.text = mnode\n if element.tag == \"hostnames\":\n hostnames_list = []\n for hostname in element.getchildren():\n hostnames_list.append(hostname.text)\n element.text = hostnames_list\n peer_dict[element.tag] = element.text\n\n pool_list_list.append(peer_dict)\n return pool_list_list", "def get_lbaas_agent_hosting_pool(self, pool, **_params):\r\n return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,\r\n params=_params)", "def get_pools():\n gclass = get_class(\"dhcpPool\")\n if gclass is None:\n logger.error(\"failed to get dhcpPool\")\n return None\n\n pools = {}\n for obj in gclass:\n if \"attributes\" in obj[obj.keys()[0]]:\n attr = obj[obj.keys()[0]][\"attributes\"]\n for r in [\"className\", \"dn\", \"id\", \"type\", \"startIp\", \n \"endIp\", \"freeIPs\"]:\n if r not in attr:\n logger.error(\"missing %s, invalid object: %s\" % (\n r, pretty_print(obj)))\n return None\n ip = ipv4_to_int(attr[\"startIp\"])\n if ip is None:\n logger.error(\"failed to convert ipv4 address for %s\" % obj)\n return None\n p = {\n \"className\": attr[\"className\"],\n \"dn\": attr[\"dn\"],\n \"id\": attr[\"id\"],\n \"type\": attr[\"type\"],\n \"address\": ip,\n \"address_str\": attr[\"startIp\"],\n \"freeIPs\": attr[\"freeIPs\"]\n }\n if ip not in pools:\n pools[ip] = {\"bad_lease\":[], \"good_lease\":[], \"pools\":[],\n \"type\":attr[\"className\"], \"state\":\"\", \"address\":ip}\n pools[ip][\"pools\"].append(p)\n\n # loop through all entries in pool and update state\n for ip in pools:\n state = \"recovery\"\n for p in pools[ip][\"pools\"]:\n if p[\"type\"]!=\"recovery\": state = p[\"type\"]\n pools[ip][\"state\"] = state\n return pools", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def pools(self, summary=True, tags_intersect=None, tags=None):\n return list(self.all_pools(summary=summary, tags=tags, tags_intersect=tags_intersect))", "def get_pools():\n command = 'zpool list -H'\n try:\n p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)\n except OSError:\n raise Exception('No ZFS tools found!')\n zpout, zperr = p.communicate()\n if p.returncode:\n raise Exception(\"Error executing '%s': %d\" % (command, p.returncode))\n return [line.split('\\t', 1)[0] for line in zpout.split('\\n') if line]", "def list_loadbalancers(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-loadbalancers option\"\n )\n\n ret = {}\n conn = get_conn()\n datacenter = get_datacenter(conn)\n\n for item in conn.list_loadbalancers(datacenter[\"id\"])[\"items\"]:\n lb = {\"id\": item[\"id\"]}\n lb.update(item[\"properties\"])\n ret[lb[\"name\"]] = lb\n\n return ret", "def _get_objects(cls, lb, names, minimal=False):\n\n if not names:\n return []\n\n pools = cls.factory.create(names, lb)\n\n if not minimal:\n active_member_count = cls._lbcall(lb, 'get_active_member_count',\n names)\n description = cls._lbcall(lb, 'get_description', names)\n lbmethod = cls._lbcall(lb, 'get_lb_method', names)\n members = cls._lbcall(lb, 'get_member', names)\n minimum_active_member = cls._lbcall(lb, 'get_minimum_active_member',\n names)\n minimum_up_member = cls._lbcall(lb, 'get_minimum_up_member',\n names)\n slow_ramp_time = cls._lbcall(lb, 'get_slow_ramp_time', names)\n statistics = cls._lbcall(lb, 'get_statistics', names)\n\n for idx,pool in enumerate(pools):\n pool._active_member_count = active_member_count[idx]\n pool._description = description[idx]\n pool._lbmethod = lbmethod[idx]\n pool._minimum_active_member = minimum_active_member[idx]\n pool._minimum_up_member = minimum_up_member[idx]\n pool._slow_ramp_time = slow_ramp_time[idx]\n pool._statistics = statistics['statistics'][idx]\n\n pool._members = f5.PoolMember._get_objects(lb, [pool],\n [members[idx]], minimal=True)\n\n return pools", "def get_list(\n self,\n name=None, # type: Optional[str]\n label_selector=None, # type: Optional[str]\n page=None, # type: Optional[int]\n per_page=None, # type: Optional[int]\n ):\n # type: (...) -> PageResults[List[BoundLoadBalancer], Meta]\n params = {}\n if name is not None:\n params[\"name\"] = name\n if label_selector is not None:\n params[\"label_selector\"] = label_selector\n if page is not None:\n params[\"page\"] = page\n if per_page is not None:\n params[\"per_page\"] = per_page\n\n response = self._client.request(url=\"/load_balancers\", method=\"GET\", params=params)\n\n ass_load_balancers = [\n BoundLoadBalancer(self, load_balancer_data) for load_balancer_data in response[\"load_balancers\"]\n ]\n return self._add_meta_to_result(ass_load_balancers, response)", "def get_pool_ids(host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n pool_ids = []\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1, pool_ids\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n pool_ids.append(p[\"id\"])\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1, pool_ids\n return 0, pool_ids", "def test_list_pools_sort(self):\r\n resources = \"pools\"\r\n cmd = pool.ListPool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def pools_refresh(self):\n path = \"%s/commands/poolsRefresh\" % self.__base_path\n response = self.__session.post(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def list_pools(self,\n instance_id: str,\n *,\n x_correlation_id: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n if instance_id is None:\n raise ValueError('instance_id must be provided')\n headers = {\n 'X-Correlation-ID': x_correlation_id\n }\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_pools')\n headers.update(sdk_headers)\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/instances/{0}/pools'.format(\n *self.encode_path_vars(instance_id))\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers)\n\n response = self.send(request)\n return response", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)" ]
[ "0.7458122", "0.734829", "0.7194336", "0.7013317", "0.69429797", "0.68600583", "0.68326235", "0.6807121", "0.6746081", "0.66606236", "0.66272414", "0.6563584", "0.6472164", "0.64286816", "0.63685405", "0.6313028", "0.6151671", "0.6101827", "0.608971", "0.6061556", "0.60079795", "0.5990114", "0.59415853", "0.5939939", "0.591817", "0.5845067", "0.58336216", "0.58267117", "0.5825304", "0.57843745" ]
0.79365355
0
Fetch a list of all credentials for a tenant.
def list_credentials(self, **_params): return self.get(self.credentials_path, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_credentials():\n session = db.get_session()\n return (session.query(network_models_v2.Credential).all())", "def list_credentials(user):\n return Credentials.list_credentials(user)", "def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result", "def display_credentials(cls):\n return cls.credential_list", "def display_credentials(cls):\n return cls.credential_list", "def display_credentials(cls):\n return cls.credential_list", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def get_credentials(self, context, filters=None, fields=None):\n return self._get_collection(context,\n network_models_v2.Credential,\n self._make_credential_dict,\n filters=filters,\n fields=fields)", "def get_creds(self):\n return self.creds", "def get_all_auths(self):\n return self.all_auths", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def display_credentials(cls):\n return cls.credentials_list", "def display_credentials(cls):\n return cls.credentials_list", "def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def creds(self):\n return CredsTable(self.rpc, self.name)", "def list(ctx, show_hidden, oath_type, period, password, remember):\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = [\n cred\n for cred in session.list_credentials()\n if show_hidden or not is_hidden(cred)\n ]\n creds.sort()\n for cred in creds:\n click.echo(_string_id(cred), nl=False)\n if oath_type:\n click.echo(f\", {cred.oath_type.name}\", nl=False)\n if period:\n click.echo(f\", {cred.period}\", nl=False)\n click.echo()", "def get_user_credentials(connection):\n\n response = connection.get_json('user')\n user_data = response.get('user', None)\n if user_data is None:\n raise SAPCliError('gCTS response does not contain \\'user\\'')\n\n config_data = user_data.get('config', None)\n if config_data is None:\n return []\n\n user_credentials = [cred for cred in config_data if cred['key'] == 'USER_AUTH_CRED_ENDPOINTS']\n return json.loads(user_credentials[0]['value'])", "def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")", "def credentials(self) -> Sequence['outputs.DeviceCredentialResponse']:\n return pulumi.get(self, \"credentials\")", "def list(self, tenant_id=None):\n\n if not tenant_id:\n return self._list(\"/users\", \"users\")\n else:\n return self._list(\"/tenants/%s/users\" % tenant_id, \"users\")", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def display_credential(cls):\n return cls.credential_list", "def getAllCredentialsFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def credential_list():\n rows = safeisland.list_certificates()\n certs = []\n for row in rows:\n# certs.append(row[\"cert\"])\n certs.append({\"uuid\": row[\"uuid\"], \"cert\": row[\"cert\"]})\n\n return {\"payload\": certs}", "def _GetCredentialsIter(self, credentials_file=None):\n if not credentials_file:\n credentials_file = os.path.join(os.path.dirname(__file__),\n 'credentials.txt')\n if os.path.exists(credentials_file):\n with open(credentials_file) as f:\n for credentials in f:\n username, password = credentials.strip().split(':')\n yield username, password", "def get_tenant_resources(self):\n resources = self.context[\"tenant\"].get(\"resources\", [])\n if not resources:\n msg = (\"No resources found for tenant: %s\"\n % self.context[\"tenant\"].get(\"name\"))\n raise exceptions.NotFoundException(message=msg)\n for res_id in resources:\n self._get_resource(res_id)", "def getAllCredentials(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getAllCredentialsFromJWTToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_all_customers():\n data = user_obj.get_all_customers()\n return data" ]
[ "0.7408005", "0.6807436", "0.6548845", "0.6349993", "0.6349993", "0.6349993", "0.6300897", "0.62632185", "0.62337697", "0.6205891", "0.61948234", "0.61883175", "0.61883175", "0.6169535", "0.6119927", "0.6083075", "0.59551257", "0.5942657", "0.58955693", "0.58475935", "0.58452415", "0.5837588", "0.5810683", "0.5810358", "0.5781048", "0.5748504", "0.573842", "0.573089", "0.5721824", "0.5715066" ]
0.7127034
1
Create a new credential.
def create_credential(self, body=None): return self.post(self.credentials_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential", "def m_credential_create(node_name, credential_hash, participantDID):\n pass", "def make_instance(self, include_optional):\n # model = openapi_client.models.cred_credential.CredCredential() # noqa: E501\n if include_optional :\n return CredCredential(\n id = '', \n account_guid = '', \n account_id = '', \n api_token = openapi_client.models.common/secret.common.Secret(\n encrypted = '', \n plain = '', ), \n ca_cert = '', \n description = '', \n external = True, \n last_modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), \n owner = '', \n role_arn = '', \n secret = openapi_client.models.common/secret.common.Secret(\n encrypted = '', \n plain = '', ), \n tokens = openapi_client.models.cred/temporary_token.cred.TemporaryToken(\n aws_access_key_id = '', \n aws_secret_access_key = openapi_client.models.common/secret.common.Secret(\n encrypted = '', \n plain = '', ), \n duration = 56, \n expiration_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), \n token = openapi_client.models.common/secret.common.Secret(\n encrypted = '', \n plain = '', ), ), \n type = '[\\\"aws\\\",\\\"azure\\\",\\\"gcp\\\",\\\"ibmCloud\\\",\\\"apiToken\\\",\\\"githubToken\\\",\\\"basic\\\",\\\"dtr\\\",\\\"kubeconfig\\\",\\\"certificate\\\"]', \n use_aws_role = True\n )\n else :\n return CredCredential(\n )", "def test_credential_create(self):\n self.new_credential.credential_create()\n self.assertEqual(len(Credentials.credentials_list), 1)", "def createAgentCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def new_credentials(site_name, user_name, password):\n new_credentials = Credentials(site_name, user_name, password)\n return new_credentials", "def create(credentials: ICredentials, expires=30, registry=None):", "def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)", "def create(self, username, password):\n pass", "def create(self, credentials):\n return User.objects.create_user(\n credentials['username'],\n credentials['email'],\n credentials['password']\n )", "def setUp(self):\n self.new_credential = Credential(\"winnie\", \"facebook\",\"deinawinnie\",\"winnie\")", "def create_credentials():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server()\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n return creds", "def set_credentials():", "def add_credential(self, authenticator_id, credential):\n pass", "async def create_credential_offer(self, credential_definition_id: str) -> str:\n try:\n async with self._profile.session() as session:\n cred_def = await session.handle.fetch(\n CATEGORY_CRED_DEF, credential_definition_id\n )\n key_proof = await session.handle.fetch(\n CATEGORY_CRED_DEF_KEY_PROOF, credential_definition_id\n )\n except AskarError as err:\n raise IndyIssuerError(\"Error retrieving credential definition\") from err\n if not cred_def or not key_proof:\n raise IndyIssuerError(\n \"Credential definition not found for credential offer\"\n )\n try:\n # The tag holds the full name of the schema,\n # as opposed to just the sequence number\n schema_id = cred_def.tags.get(\"schema_id\")\n cred_def = CredentialDefinition.load(cred_def.raw_value)\n\n credential_offer = CredentialOffer.create(\n schema_id or cred_def.schema_id,\n cred_def,\n key_proof.raw_value,\n )\n except CredxError as err:\n raise IndyIssuerError(\"Error creating credential offer\") from err\n\n return credential_offer.to_json()", "def SetUp(self) :\n self.new_credential = Credential(\"snapchat\", \"[email protected]\", \"Chat001\") # new credential", "def add_credential(args):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n # next, load the new credential\n new_cred = read_new_credential(args.cred_file)\n\n # check for any conflicts between the new credential and an existing one\n conflicting_cred_idx = None\n for idx, cred in enumerate(creds):\n if cred.username == new_cred.username:\n if len(cred.hostname) > 0 and len(new_cred.hostname) > 0 \\\n and cred.hostname == new_cred.hostname:\n conflicting_cred_idx = idx\n elif len(cred.hostname) == 0 and len(new_cred.hostname) == 0:\n conflicting_cred_idx = idx\n if conflicting_cred_idx is not None:\n if args.force:\n creds[conflicting_cred_idx] = new_cred\n else:\n logger.error(\"Credential already exists; overwrite with --force\")\n return\n else:\n creds.append(new_cred)\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()", "def setUp(self):\n self.new_credential = Credential(\"Facebook\",\"Yvonnah Bonswuh\",\"[email protected]\",\"ivy1996\") # create credential object", "def newcred(self):\n return {'login': input('username: '),\n 'password': getpass.getpass()}", "def setUp(self):\n # instantiate an object by populating with dummy values.\n self.new_credential = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")", "def createIfNotExist(self):\n\n if self.filename and (not os.path.exists(self.filename)):\n logSupport.log.debug(\"Credential %s does not exist.\" % (self.filename))\n self.create()", "def getNewCustosCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def credential(self, value):\n credential = self.organization.get_credential_by_name_with_type_id(value,\n self.credential._data.get('credential_type'))\n if not credential:\n raise InvalidCredential(value)\n self._update_values('credential', credential.id)", "def test_create_application_credential(self):\n app_cred = self.create_application_credential()\n\n # Check that the secret appears in the create response\n secret = app_cred['secret']\n\n # Check that the secret is not retrievable after initial create\n app_cred = self.non_admin_app_creds_client.show_application_credential(\n user_id=self.user_id,\n application_credential_id=app_cred['id']\n )['application_credential']\n self.assertNotIn('secret', app_cred)\n\n # Check that the application credential is functional\n _, resp = self.non_admin_token.get_token(\n app_cred_id=app_cred['id'],\n app_cred_secret=secret,\n auth_data=True\n )\n self.assertEqual(resp['project']['id'], self.project_id)", "def test_create_credential_scope(self):\n\n scope = self_signed.create_credential_scope()\n assert scope == \"20170720/us-west-2/es/aws4_request\"", "def read_new_credential(csv_file=None):\n options = {}\n if csv_file is None:\n logger.info(\"Generating configuration with user-specified username + password\")\n username = input(\"Username: \")\n if len(username) == 0:\n raise RuntimeError(\"Username may not be empty\")\n password = getpass.getpass()\n if len(password) == 0:\n raise RuntimeError(\"Password may not be empty\")\n hostname = _validate_hostname(input(\"Hostname (may be empty): \"))\n token_endpoint = input(\"Token endpoint (empty if not applicable): \") or None\n else:\n if os.path.exists(csv_file):\n with open(csv_file, \"r\") as f:\n reader = csv.DictReader(f)\n cred = next(reader)\n username = cred[\"username\"]\n password = cred[\"password\"]\n hostname = cred[\"hostname\"] if \"hostname\" in cred else \"\"\n token_endpoint = cred.get(\"token_endpoint\")\n if \"mechanism\" in cred:\n options[\"method\"] = cred[\"mechanism\"].replace(\"-\", \"_\")\n if \"protocol\" in cred:\n options[\"ssl\"] = cred[\"protocol\"] != \"SASL_PLAINTEXT\"\n if \"ssl_ca_location\" in cred:\n options[\"ssl_ca_location\"] = cred[\"ssl_ca_location\"]\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), csv_file)\n return Auth(username, password, hostname, token_endpoint=token_endpoint, **options)", "async def store_credential(\n self, cred_ex_record: V20CredExRecord, cred_id: str = None\n ) -> None:", "def save_credentials(credentials):\n credentials. save_details()", "def create(self, username, password, email):\n pass", "def putCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.82129794", "0.71526945", "0.6765534", "0.67013925", "0.6690681", "0.6671148", "0.6534608", "0.6408601", "0.63128364", "0.62566274", "0.6237215", "0.6199901", "0.6177472", "0.6149115", "0.6122608", "0.6107753", "0.60892326", "0.6071949", "0.6031311", "0.60056144", "0.5982874", "0.5964065", "0.59308976", "0.59020245", "0.59017414", "0.5877203", "0.58535016", "0.58274806", "0.5774301", "0.5771493" ]
0.786629
1
Delete the specified credential.
def delete_credential(self, credential): return self.delete(self.credential_path % (credential))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_credential(credentials):\n credentials.delete_credentials()", "def delete_credential(self):\n Credentials.credentials_list.remove(self)", "def delete_credential(self):\n\n Credential.credential_list.remove(self)", "def delete_credential(self):\n Credential.credential_list.remove(self)", "def deleteCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_credential(self, context, id):\n return remove_credential(id)", "def remove_credential(self, authenticator_id, credential_id):\n pass", "def delete_credentials(self):\n Credentials.credential_list.remove(self)", "def delete(ctx, query, force):\n\n ensure_validated(ctx)\n controller = ctx.obj['controller']\n creds = controller.list()\n hits = _search(creds, query)\n if len(hits) == 0:\n click.echo('No matches, nothing to be done.')\n elif len(hits) == 1:\n cred = hits[0]\n if force or (click.confirm(\n u'Delete credential: {} ?'.format(cred.printable_key),\n default=False, err=True\n )):\n controller.delete(cred)\n click.echo(u'Deleted {}.'.format(cred.printable_key))\n else:\n click.echo('Deletion aborted by user.')\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete_credentials(self):\n Credentials.credentials_list.remove(self)", "def delete_credentials(self):\n Credentials.credentials_list.remove(self)", "def delete_credentials(self):\n Credentials.credentials_list.remove(self)", "def deleteAgentCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()", "def delete_credential(name: str):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n if '@' in name:\n username, hostname = name.split('@')\n else:\n username = name\n hostname = None\n\n # next, try to figure out which one we're supposed to remove\n matches = []\n match_indices = []\n\n for idx, cred in enumerate(creds):\n # the username must match\n if cred.username != username:\n continue\n # if specified, the hostname must match\n if hostname is not None and cred.hostname != hostname:\n continue\n\n matches.append(cred)\n match_indices.append(idx)\n\n if len(matches) == 0:\n err = f\"No matching credential found with username '{username}'\"\n if hostname is not None:\n err += f\" with hostname '{hostname}'\"\n raise RuntimeError(err)\n elif len(matches) > 1:\n raise RuntimeError(_construct_ambiguous_deletion_message(username, hostname, matches))\n\n # At this point we should have exactly one match, which we can delete\n del creds[match_indices[0]]\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()", "def deleteCredential(self, credentialName):\n try:\n utility.execLog(\"Deleting Credential: %s\" % credentialName)\n self.browserObject, status, result = self.selectCredential(credentialName)\n if not status:\n return self.browserObject, False, result\n # Checking for Default Credentials - 'Delete' will be Disabled\n disabled = self.handleEvent(EC.presence_of_element_located((By.ID, self.CredentialsObjects('deleteCredentials'))), action=\"GET_ATTRIBUTE_VALUE\", attributeName=\"disabled\")\n if \"true\" in disabled:\n return self.browserObject, False, \"Unable to Delete Default Credential: %s\" % credentialName\n # Clicking on Delete\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CredentialsObjects('deleteCredentials'))), action=\"CLICK\")\n utility.execLog(\"Checking for Confirm Box...\")\n try:\n currentTitle = self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.CommonObjects('GetFormTitle'))), action=\"GET_TEXT\")\n except:\n return self.browserObject, False, \"Unable to Load Confirm Box To Delete Credential\"\n if \"Confirm\" in currentTitle:\n utility.execLog(\"Confirm Box Loaded...Confirming to Delete Credential: '%s'\" % credentialName)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CommonObjects('ConfirmYes'))), action=\"CLICK\")\n else:\n utility.execLog(\"Failed to Verify Confirm Delete Box :: Actual --> '%s' :: Expected --> '%s'\" % (currentTitle, \"Confirm\"))\n return self.browserObject, False, \"Failed to Verify Confirm Delete Box :: Actual --> '%s' :: Expected --> '%s'\" % (currentTitle, \"Confirm\")\n # Checking for Error Deleting a Credential\n try:\n errorRedBox = self.handleEvent(EC.visibility_of_element_located((By.XPATH, self.CommonObjects('RedBoxError'))), wait_time=10)\n if errorRedBox:\n errorMessage = self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.CommonObjects('RedBoxErrorMessages'))),action=\"GET_TEXT\")\n return self.browserObject, False, \"Failed to Delete Credential :: '%s' :: Error -> %s\" % (credentialName, errorMessage)\n except:\n # Refresh Table\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.CredentialsObjects('credentialsRefresh'))), action=\"CLICK\")\n time.sleep(3)\n # VALIDATION: Selecting deleted Credential\n self.browserObject, status, result = self.selectCredential(credentialName)\n if status:\n return self.browserObject, False, \"Failed to Delete Credential :: '%s' :: Error -> %s\" % (credentialName, \"Validation Error\")\n else:\n return self.browserObject, True, \"Successfully Deleted Credential: '%s'\" % credentialName\n except Exception as e:\n return self.browserObject, False, \"Exception while Deleting Credential :: '%s' :: Error -> %s\" % (credentialName, str(e) + format_exc())", "def delete(ctx, query, force, password, remember):\n\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = session.list_credentials()\n hits = _search(creds, query, True)\n if len(hits) == 0:\n click.echo(\"No matches, nothing to be done.\")\n elif len(hits) == 1:\n cred = hits[0]\n if force or (\n click.confirm(\n f\"Delete account: {_string_id(cred)} ?\",\n default=False,\n err=True,\n )\n ):\n session.delete_credential(cred.id)\n click.echo(f\"Deleted {_string_id(cred)}.\")\n else:\n click.echo(\"Deletion aborted by user.\")\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete_integrations_credential(self, credential_id, **kwargs):\n\n all_params = ['credential_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_integrations_credential\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'credential_id' is set\n if ('credential_id' not in params) or (params['credential_id'] is None):\n raise ValueError(\"Missing the required parameter `credential_id` when calling `delete_integrations_credential`\")\n\n\n resource_path = '/api/v2/integrations/credentials/{credentialId}'.replace('{format}', 'json')\n path_params = {}\n if 'credential_id' in params:\n path_params['credentialId'] = params['credential_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_delete_creds(self):\n self.new_credentials.save_creds()\n self.new_credentials.delete_creds()\n\n self.assertEqual(len(Credentials.credential_list),0)", "def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"Facebook\",\"Chris\",\"[email protected]\",\"chris1\") # new credential\n test_credential.save_credential()\n self.new_credential.delete_credential() # Deleting a credential object\n self.assertEqual(len(Credential.credential_list),1)", "def locked_delete(self):\n self._multistore._delete_credential(self._key)", "def test_delete_credentials(self):\n\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\")\n test_credential.save_attributes()\n\n self.new_credentials.delete_credentials()\n self.assertEqual(len(Credentials.credentials_list), 1)", "def delete_account(self):\n Credential.account_list.remove(self)", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()", "def test_delete_user(self) :\n self.new_credential.save_credential()\n test_credential = Credential(\"peter\", \"Peter\", \"Peter003\") # new user\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)", "def test_delete_hyperflex_local_credential_policy(self):\n pass", "def remove_credentials(self, conjurrc: ConjurrcData):\n self.credentials_provider.remove_credentials(conjurrc)", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def test_delete_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"0712345678\",\"test\",\"login\",\"0712345678\")# new credential\n test_credential.save_credential()\n\n self.new_credential.delete_credential()# delete a credential object\n self.assertEqual(len(Credential.credential_list),1)" ]
[ "0.83345723", "0.78936267", "0.7870029", "0.7769463", "0.7743528", "0.76626366", "0.7516849", "0.74108857", "0.7403175", "0.71771854", "0.71771854", "0.71771854", "0.7061404", "0.6968268", "0.69647276", "0.69591576", "0.67291147", "0.6704603", "0.65337545", "0.64367586", "0.6433398", "0.6399359", "0.62819594", "0.61621505", "0.61303014", "0.60205775", "0.5847525", "0.5750633", "0.57333773", "0.5723159" ]
0.8819896
0
Fetch a list of all network profiles for a tenant.
def list_network_profiles(self, **params): return self.get(self.network_profiles_path, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_profiles(self, params):\n return self.profiles", "def fetch_all(profile):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(instanceprofile, \"get\", params)\n data = utils.get_data(\"InstanceProfiles\", response)\n return data", "def GetAllProfiles(self):\n profiles = []\n feed_uri = self._gd_client.GetFeedUri('profiles')\n while feed_uri:\n feed = self._gd_client.GetProfilesFeed(uri=feed_uri)\n profiles.extend(feed.entry)\n feed_uri = feed.FindNextLink()\n self._profiles = profiles", "def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles", "def list_network_profiles(arn=None, type=None, nextToken=None):\n pass", "def get_all_profiles(store=\"local\"):\n return {\n \"Domain Profile\": get_all_settings(profile=\"domain\", store=store),\n \"Private Profile\": get_all_settings(profile=\"private\", store=store),\n \"Public Profile\": get_all_settings(profile=\"public\", store=store),\n }", "def get_profiles(self):\n profiles = [['Profile name', 'GUID']]\n r = self.system_cursor.execute('{Call wtGetProfileList()}')\n for row in r.fetchall():\n profiles.append([row.PROFILE_NAME, row.PROFILE_GUID])\n return profiles", "def list_profiles(self):\n return self._get(\"posture\", box=BoxList)", "def get_all_profiles(self) -> List[Profile]:\n return [self.model.parse_obj(profile) for profile in self.read_records(SyncMode.full_refresh)]", "async def all(self):\n log('retrieving profile cards..')\n start = time.monotonic()\n profiles = await asyncio.gather(*list(\n map(lambda profile: self.retrieve(str(profile['id'])), await self.list())\n ))\n elapsed = \"%0.2fs\" % (time.monotonic() - start,)\n log(\"retrieved {} profile cards in {}\".format(len(profiles), elapsed))\n return {\n \"hits\": len(profiles),\n \"updated\": time.time() * 1000,\n \"time\": elapsed,\n \"applicants\": list(filter(None, map(self.parse, profiles)))\n }", "def profiles_names(self):\n url = get_url('profiles')\n response = self._get(url)\n raise_on_error(response)\n return response.json()", "def fusion_api_get_server_profiles(self, uri=None, param='', api=None, headers=None):\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)", "def list(self):\n # List is to be extended (directories should not have a trailing slash)\n paths_to_ignore = ['.DS_Store']\n\n profiles = []\n cache = ClientCache(self._conan_api.cache_folder)\n profiles_path = cache.profiles_path\n if os.path.exists(profiles_path):\n for current_directory, _, files in os.walk(profiles_path, followlinks=True):\n files = filter(lambda file: os.path.relpath(\n os.path.join(current_directory, file), profiles_path) not in paths_to_ignore, files)\n\n for filename in files:\n rel_path = os.path.relpath(os.path.join(current_directory, filename),\n profiles_path)\n profiles.append(rel_path)\n\n profiles.sort()\n return profiles", "def list_policy_profiles(self, **params):\r\n return self.get(self.policy_profiles_path, params=params)", "def profiles(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n return list(filter(lambda x: x is not None, executor.map(self.profile_details, self.profiles_names())))", "def profiles(self):\n return self._profiles", "def profiles(self):\n return self._profiles", "def get(self, entity, schema):\n return jsonify(entity.profiles.get_or_404(schema=schema).to_json()), 200", "def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)", "def show_network_profile(self, profile, **params):\r\n return self.get(self.network_profile_path % (profile), params=params)", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()", "def list_profiles(self) -> dict:\n wsc = self.read_ws_configuration()\n out = OrderedDict()\n for name, json in wsc.profiles.items():\n out[name] = Profile(name, self.ws_data_folder / name, json)\n # Try to find current profile\n try:\n out[self.current_profile_name].is_current = True\n except Exception:\n pass\n return out", "def available_profiles(cls) -> List[str]:\n return list(cfg.get(\"profiles\"))", "def fusion_api_get_server_profiles_available_networks(self, uri=None, param='', api=None, headers=None):\n param = '/available-networks%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)", "def fusion_api_get_server_profile_templates(self, uri=None, param='', api=None, headers=None):\n return self.profile_template.get(uri=uri, api=api, headers=headers, param=param)", "def getProfiles(context):\n\n analytics_tool = getToolByName(getSite(), 'portal_analytics')\n # short circuit if user hasn't authorized yet\n if not analytics_tool.is_auth():\n return SimpleVocabulary([])\n\n try:\n profiles = analytics_tool.makeCachedRequest('profiles')\n except error.BadAuthenticationError:\n choices = [('Please authorize with Google in the Google Analytics \\\n control panel.', None)]\n return SimpleVocabulary.fromItems(choices)\n except error.RequestTimedOutError:\n choices = [('The request to Google Analytics timed out. Please try \\\n again later.', None)]\n return SimpleVocabulary.fromItems(choices)\n if profiles:\n unique_choices = {}\n for entry in profiles:\n title = entry.get('name')\n title = crop(title, 40)\n tableId = entry.get('id')\n unique_choices.update({title: tableId})\n choices = unique_choices.items()\n else:\n choices = [('No profiles available', None)]\n return SimpleVocabulary([SimpleTerm(c[1], c[1], c[0]) for c in choices])", "def fusion_api_get_server_profiles_profile_ports(self, uri=None, param='', api=None, headers=None):\n param = '/profile-ports%s' % param\n return self.profile.get(uri=uri, api=api, headers=headers, param=param)", "def network_list_for_tenant(request, tenant_id, include_external=False,\n include_pre_auto_allocate=False, page_data=None,\n **params):\n\n # Pagination is implemented consistently with nova and cinder views,\n # which means it is a bit hacky:\n # - it requests X units but displays X-1 units\n # - it ignores the marker metadata from the API response and uses its own\n # Here we have extra hacks on top of that, because we have to merge the\n # results of 3 different queries, and decide which one of them we are\n # actually paginating.\n # The 3 queries consist of:\n # 1. Shared=True networks\n # 2. Project non-shared networks\n # 3. External non-shared non-project networks\n # The main reason behind that order is to maintain the current behavior\n # for how external networks are retrieved and displayed.\n # The include_external assumption of whether external networks should be\n # displayed is \"overridden\" whenever the external network is shared or is\n # the tenant's. Therefore it refers to only non-shared non-tenant external\n # networks.\n # To accomplish pagination, we check the type of network the provided\n # marker is, to determine which query we have last run and whether we\n # need to paginate it.\n\n LOG.debug(\"network_list_for_tenant(): tenant_id=%(tenant_id)s, \"\n \"params=%(params)s, page_data=%(page_data)s\", {\n 'tenant_id': tenant_id,\n 'params': params,\n 'page_data': page_data,\n })\n\n page_data, marker_net = _configure_pagination(\n request, params, page_data, tenant_id=tenant_id)\n\n query_kwargs = {\n 'request': request,\n 'include_external': include_external,\n 'tenant_id': tenant_id,\n 'page_data': page_data,\n **params,\n }\n\n return _perform_query(\n _query_nets_for_tenant, query_kwargs, marker_net,\n include_pre_auto_allocate)", "def get_user_profiles(self):\n print 'inside get user profiles'\n print 'self.username :' + self.username\n g = GoogleAnalyticsAPI(self.username)\n if g:\n print 'GA client exists'\n user_accounts = g.get_user_accounts()\n return user_accounts.get('items')\n else:\n print 'GA client does not exist'\n return []", "def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')" ]
[ "0.6795108", "0.6778038", "0.6731372", "0.6688756", "0.6614129", "0.66025275", "0.65353435", "0.6218279", "0.6108761", "0.6066531", "0.6055497", "0.6022031", "0.5943921", "0.5920367", "0.5908565", "0.5905929", "0.5905929", "0.57345873", "0.56705946", "0.5632675", "0.5630586", "0.56192774", "0.55908066", "0.55558556", "0.5545123", "0.5530818", "0.5516969", "0.5465661", "0.5438671", "0.5425268" ]
0.7375674
0
Fetch a network profile.
def show_network_profile(self, profile, **params): return self.get(self.network_profile_path % (profile), params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)", "def getProfile(self, profile):\n for network in self.networks:\n if network.getProfileName() == profile:\n return network\n else:\n raise Exception('Network with profile name \"%s\" not found' % profile)", "def get_network_profile(arn=None):\n pass", "def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def retrieve_profile(self, name):\n\n url = get_url('profile details', profile=name)\n response = self._get(url)\n raise_on_error(response)\n if response.status_code == 404:\n raise QarnotGenericException(response.json()['message'])\n return Profile(response.json())", "def network_profile(self) -> Optional['outputs.ClusterPoolResourcePropertiesResponseNetworkProfile']:\n return pulumi.get(self, \"network_profile\")", "def get_profile_get(self, components, destinyMembershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/{membershipType}/Profile/{destinyMembershipId}/\"))", "def get_profile():\n logger.debug(\"entering function get_profile\")\n response = read_user_profile()\n logger.debug(\"exiting function get_profile\")\n return jsonify(response)", "def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)", "def get_profile(profile_id, token):\n url = '{}{}/'.format(PROFILE_ENDPOINT, profile_id)\n res = requests.get(url, headers={\n 'Content-Type': 'application/json',\n 'Authorization': 'Token {}'.format(token)\n })\n return res.json()", "def test_get_profile(self):\n self.cim.get_profile(customer_profile_id=u\"123\")", "def get_my_profile(self):\n return GetMyProfileRequest(self)", "def get_profile(profile_id):\n profile = Profile.objects.get(id=profile_id)\n return profile", "def get_profile(profile_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProfileResult:\n __args__ = dict()\n __args__['profileName'] = profile_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:network/v20220401preview:getProfile', __args__, opts=opts, typ=GetProfileResult).value\n\n return AwaitableGetProfileResult(\n allowed_endpoint_record_types=pulumi.get(__ret__, 'allowed_endpoint_record_types'),\n dns_config=pulumi.get(__ret__, 'dns_config'),\n endpoints=pulumi.get(__ret__, 'endpoints'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n max_return=pulumi.get(__ret__, 'max_return'),\n monitor_config=pulumi.get(__ret__, 'monitor_config'),\n name=pulumi.get(__ret__, 'name'),\n profile_status=pulumi.get(__ret__, 'profile_status'),\n tags=pulumi.get(__ret__, 'tags'),\n traffic_routing_method=pulumi.get(__ret__, 'traffic_routing_method'),\n traffic_view_enrollment_status=pulumi.get(__ret__, 'traffic_view_enrollment_status'),\n type=pulumi.get(__ret__, 'type'))", "def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile", "def profile_details(self, profile_name):\n url = get_url('profile details', profile=profile_name)\n response = self._get(url)\n if response.status_code == 404:\n return None\n raise_on_error(response)\n return Profile(response.json())", "def list_network_profiles(self, **params):\r\n return self.get(self.network_profiles_path, params=params)", "def network_profile(self) -> Optional[pulumi.Input['AgentPoolNetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "async def retrieve(self, profile_id):\n profile = await self.get(self.profile_load.format(profile_id))\n log(\"retrieved card for {}\".format(profile['title']))\n return profile", "def fetch_profile(self, api_token: str, id: str) -> dict:\n query = \"\"\"\n query Profile($id: ID!, $service_type: ServiceType!) {\n profile(id: $id, serviceType: $service_type) {\n id\n }\n }\n \"\"\"\n\n path = jmespath.compile(\n \"\"\"\n data.profile.{\n id: id\n }\n \"\"\"\n )\n\n data = self.do_query(\n query,\n api_token=api_token,\n variables={\n \"id\": id,\n \"service_type\": settings.HELSINKI_PROFILE_SERVICE_TYPE,\n },\n )\n\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"id\"])\n return parsed_data", "def get(cls, client, name=\"\", option_=\"\") :\n try :\n if not name :\n obj = nshttpprofile()\n response = obj.get_resources(client, option_)\n else :\n if type(name) != cls :\n if type(name) is not list :\n obj = nshttpprofile()\n obj.name = name\n response = obj.get_resource(client, option_)\n else :\n if name and len(name) > 0 :\n response = [nshttpprofile() for _ in range(len(name))]\n obj = [nshttpprofile() for _ in range(len(name))]\n for i in range(len(name)) :\n obj[i] = nshttpprofile()\n obj[i].name = name[i]\n response[i] = obj[i].get_resource(client, option_)\n return response\n except Exception as e :\n raise e", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def get(self, username):\n\t\tdb = getattr(g, 'db', None)\n\n\t\tqry = \"SELECT username,email,active,steamid FROM\\\n\t\t\tprofiles WHERE username = %s;\"\n\t\twith db as cursor:\n\t\t\tcursor.execute(qry, (username,))\n\n\t\treturn {'profile':cursor.fetchone()}", "def get_profile(self, profiles, settings=None, options=None, conf=None, cwd=None):\n assert isinstance(profiles, list), \"Please provide a list of profiles\"\n cache = ClientCache(self._conan_api.cache_folder)\n loader = ProfileLoader(cache)\n profile = loader.from_cli_args(profiles, settings, options, conf, cwd)\n profile.conf.validate()\n cache.new_config.validate()\n # Apply the new_config to the profiles the global one, so recipes get it too\n profile.conf.rebase_conf_definition(cache.new_config)\n return profile", "def show_policy_profile(self, profile, **params):\r\n return self.get(self.policy_profile_path % (profile), params=params)", "def get_profile(self, profile_id: str):\n\n return self._get(f\"posture/{profile_id}\")", "def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()" ]
[ "0.7564368", "0.746909", "0.7128708", "0.70291847", "0.68386704", "0.68386704", "0.6765802", "0.67434824", "0.6661508", "0.6641882", "0.64806426", "0.6458895", "0.6448768", "0.6383853", "0.6378429", "0.63658094", "0.63539726", "0.6344743", "0.63307583", "0.63242495", "0.6310709", "0.6252525", "0.62200713", "0.6173916", "0.61412275", "0.6127667", "0.6124772", "0.61196876", "0.6115481", "0.61078477" ]
0.77783895
0
Create a network profile.
def create_network_profile(self, body=None): return self.post(self.network_profiles_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network_profile(projectArn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def _prepare_network_profile(self, instance_uuid):\n network_interface = {\n 'location': CONF.azure.location,\n 'ip_configurations': [{\n 'name': instance_uuid,\n 'subnet': {\n 'id': CONF.azure.vsubnet_id\n }\n }]\n }\n try:\n async_nic_creation = \\\n self.network.network_interfaces.create_or_update(\n CONF.azure.resource_group,\n instance_uuid,\n network_interface)\n nic = async_nic_creation.result()\n LOG.info(_LI(\"Create a Nic: %s\"), nic.id)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.NetworkInterfaceCreateFailure(\n reason=six.text_type(e), instance_uuid=instance_uuid)\n raise ex\n network_profile = {\n 'network_interfaces': [{\n 'id': nic.id\n }]\n }\n return network_profile", "def create(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.create_internet_gateway()", "def create(profile, name):\n # Make sure it doesn't exist already.\n if exists(profile, name):\n msg = \"Instance profile '\" + str(name) + \"' already exists.\"\n raise ResourceAlreadyExists(msg)\n\n # Now we can create it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"create\", params)\n\n # Check that it exists.\n instance_profile_data = polling_fetch(profile, name)\n if not instance_profile_data:\n msg = \"Instance profile '\" + str(name) + \"' not created.\"\n raise ResourceNotCreated(msg)\n\n # Send back the instance profile's info.\n return instance_profile_data", "def create(*args, **kwargs):\n\n factory = V2ProfileFactory()\n output = factory.create(export_json=True)\n click.echo(output)", "def create_profile(username):\n user = User.objects.create(username=username)\n return Profile.objects.create(user=user)", "def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)", "def CreateWiredNetworkProfile(self, profilename, default=False):\n profilename = misc.to_unicode(profilename)\n print \"Creating wired profile for \" + profilename\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n return False\n config.add_section(profilename)\n config.set(profilename, \"ip\", None)\n config.set(profilename, \"broadcast\", None)\n config.set(profilename, \"netmask\", None)\n config.set(profilename, \"gateway\", None)\n config.set(profilename, \"dns1\", None)\n config.set(profilename, \"dns2\", None)\n config.set(profilename, \"dns3\", None)\n config.set(profilename, \"beforescript\", None)\n config.set(profilename, \"afterscript\", None)\n config.set(profilename, \"disconnectscript\", None)\n config.set(profilename, \"default\", default)\n config.write(open(self.wired_conf, \"w\"))\n return True", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def test_create_with_profile(self):\n # make sure the guest not exists\n self.sdkapi.guest_create(self.userid, 1, 1024,\n user_profile=CONF.zvm.user_profile)\n self.assertTrue(\n self.test_util.wait_until_create_userid_complete(self.userid))", "def _create_ipsec_profile(self, context, connection):\n # Note(asarfaty) the NSX profile can be reused, so we can consider\n # creating it only once in the future, and keeping a use-count for it.\n # There is no driver callback for profiles creation so it has to be\n # done on connection creation.\n ipsec_policy_id = connection['ipsecpolicy_id']\n ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(\n context, ipsec_policy_id)\n\n try:\n profile = self._nsx_vpn.tunnel_profile.create(\n ipsecpolicy['name'] or ipsecpolicy['id'],\n description=ipsecpolicy['description'],\n encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[\n ipsecpolicy['encryption_algorithm']],\n digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[\n ipsecpolicy['auth_algorithm']],\n dh_group=ipsec_utils.PFS_MAP[ipsecpolicy['pfs']],\n pfs=True,\n sa_life_time=ipsecpolicy['lifetime']['value'],\n tags=self._nsx_tags(context, connection))\n except nsx_lib_exc.ManagerError as e:\n msg = _(\"Failed to create a tunnel profile: %s\") % e\n raise nsx_exc.NsxPluginException(err_msg=msg)\n return profile['id']", "def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()", "def create_single_access_switch_profile(self, switch_dn, if_profile_dn, switch_p_name):\n # Create switch profile\n switch_mo = self.moDir.lookupByDn(switch_dn)\n switch_p_mo = NodeP('uni/infra/', switch_p_name)\n self.commit(switch_p_mo)\n # Add switch selector\n switch_selector_mo = LeafS(str(switch_p_mo.dn), str(switch_mo.rn), 'range')\n self.commit(switch_selector_mo)\n node_block_mo = NodeBlk(switch_selector_mo.dn, str(switch_mo.rn) + NB_SUFIX, from_=switch_mo.id, to_=switch_mo.id)\n self.commit(node_block_mo)\n # Add interface profile\n rs_acc_port_p_mo = RsAccPortP(switch_p_mo.dn, if_profile_dn)\n self.commit(rs_acc_port_p_mo)", "def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)", "def create_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n created = 0\n already_exists = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is already existing\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n already_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_select_server_profile_template(profile.prof_temp)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if getattr(profile, 'hardwareType', None) is not None:\n hardware_type = profile.hardwareType\n\n if str(hardware_type)[:2:] == 'BL' or profile.server == 'unassigned':\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CreateServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfile.Advanced.set(profile)\n\n CreateServerProfile.click_create_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % profile.name)\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180, fail_if_false=False) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n if FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=timeout, fail_if_false=False) is True:\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok_or_warn(profile.name, timeout=180, fail_if_false=False) is True:\n logger.info(\"created server profile '%s' successfully\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_server_profile_status_ok_or_warn' = FALSE, skip to next profile ... \")\n continue\n else:\n logger.warn(\"'wait_activity_action_ok' = FALSE, skip to next profile ... \")\n FusionUIBase.show_activity_sidebar()\n continue\n else:\n logger.info(\"created server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_create_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n CreateServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n ui_lib.fail_test(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net", "def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def create_my_profile(\n body: Optional[UserProfilePrivateCreate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def new_sddc_ipsec_vpn_tunnel_profile(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n dh_group = kwargs['dh_group']\n digest_algo = kwargs['digest_algo']\n encrypt_algo = kwargs['encrypt_algo']\n pfs = kwargs['pfs_disable']\n\n if not pfs:\n pfs = False\n else:\n pfs = True\n\n # Check for incompatible IPSec Tunnel profile options\n if 'NO_ENCRYPTION_AUTH_AES_GMAC_128' in encrypt_algo and digest_algo:\n sys.exit('Digest algorithm should not be configured with NO_ENCRYPTION_AUTH_AES_GMAC selected as the encryption algorithm')\n elif 'NO_ENCRYPTION_AUTH_AES_GMAC_192' in encrypt_algo and digest_algo:\n sys.exit('Digest algorithm should not be configured with NO_ENCRYPTION_AUTH_AES_GMAC selected as the encryption algorithm')\n elif 'NO_ENCRYPTION_AUTH_AES_GMAC_256' in encrypt_algo and digest_algo:\n sys.exit('Digest algorithm should not be configured with NO_ENCRYPTION_AUTH_AES_GMAC selected as the encryption algorithm')\n else:\n pass\n\n #Build JSON Data\n json_data = {\n \"resource_type\": \"IPSecVpnTunnelProfile\",\n \"display_name\": display_name,\n \"id\": display_name,\n \"encryption_algorithms\": encrypt_algo,\n \"digest_algorithms\": digest_algo,\n \"dh_groups\": dh_group,\n \"enable_perfect_forward_secrecy\": pfs\n }\n json_response_status_code = new_ipsec_vpn_profile_json(proxy, session_token, display_name, json_data)\n if json_response_status_code == 200:\n sys.exit(f'IPSec Tunnel Profile {display_name} was created successfully')\n else:\n print('There was an error')\n sys.exit(1)", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def create_profile(sender, **kwargs):\n\n # I import profile here cause i can't import it right in the top.\n from .profiles import Profile\n\n user = kwargs['instance']\n\n Profile.objects.get_or_create(user=user)", "def create_pootle_profile(sender, instance, **kwargs):\n try:\n profile = instance.get_profile()\n except PootleProfile.DoesNotExist:\n profile = PootleProfile(user=instance)\n profile.save()", "def create_profile(sender, instance, signal, created, **kwargs):\n \n from phylocommons.models import UserProfile\n \n if created:\n UserProfile(user = instance).save()", "def _create_profile(self, user, profile_dir):\n log.info(\"Writing IPython cluster config files\")\n self._master.ssh.switch_user(user)\n self._master.ssh.execute(\"rm -rf '%s'\" % profile_dir)\n self._master.ssh.execute('ipython profile create')\n self._master.ssh.switch_user('root')", "def create(\n name: str,\n from_name: str = typer.Option(None, \"--from\", help=\"Copy an existing profile.\"),\n):\n\n profiles = prefect.settings.load_profiles()\n if name in profiles:\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n [red]Profile {name!r} already exists.[/red]\n To create a new profile, remove the existing profile first:\n\n prefect profile delete {name!r}\n \"\"\"\n ).strip()\n )\n raise typer.Exit(1)\n\n if from_name:\n if from_name not in profiles:\n exit_with_error(f\"Profile {from_name!r} not found.\")\n\n # Create a copy of the profile with a new name and add to the collection\n profiles.add_profile(profiles[from_name].copy(update={\"name\": name}))\n else:\n profiles.add_profile(prefect.settings.Profile(name=name, settings={}))\n\n prefect.settings.save_profiles(profiles)\n\n app.console.print(\n textwrap.dedent(\n f\"\"\"\n Created profile with properties:\n name - {name!r}\n from name - {from_name or None}\n\n Use created profile for future, subsequent commands:\n prefect profile use {name!r}\n\n Use created profile temporarily for a single command:\n prefect -p {name!r} config view\n \"\"\"\n )\n )" ]
[ "0.7556703", "0.72365105", "0.68905246", "0.6554721", "0.650793", "0.64945596", "0.6455163", "0.64482087", "0.63874567", "0.63485795", "0.63088846", "0.6287079", "0.62742215", "0.6247466", "0.6243951", "0.623538", "0.6213833", "0.6198993", "0.619535", "0.6186585", "0.61607736", "0.6144575", "0.6141656", "0.6132331", "0.61218894", "0.6118466", "0.6109497", "0.6093889", "0.6087245", "0.6071439" ]
0.8449724
0
Update a network profile.
def update_network_profile(self, profile, body=None): return self.put(self.network_profile_path % (profile), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)", "def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)", "def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })", "def do_update(self):\n params = self.inputs\n new_profile_id = params.get('new_profile_id', None)\n if new_profile_id and new_profile_id == self.entity.profile_id:\n params.pop('new_profile_id')\n\n if not params:\n return self.RES_OK, 'No property to update.'\n\n res = self.entity.do_update(self.context, params)\n if res:\n return self.RES_OK, 'Node updated successfully.'\n else:\n return self.RES_ERROR, 'Node update failed.'", "def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })", "def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)", "def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def update_profile(self, channels=None): # pragma: no cover\n pass", "def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)", "def update(self,\n dpd_profile_id,\n ip_sec_vpn_dpd_profile,\n ):\n return self._invoke('update',\n {\n 'dpd_profile_id': dpd_profile_id,\n 'ip_sec_vpn_dpd_profile': ip_sec_vpn_dpd_profile,\n })", "def update(self,\n port_mirroring_profile_id,\n port_mirroring_profile,\n ):\n return self._invoke('update',\n {\n 'port_mirroring_profile_id': port_mirroring_profile_id,\n 'port_mirroring_profile': port_mirroring_profile,\n })", "def update(self,\n ipfix_l2_profile_id,\n i_pfix_l2_profile,\n ):\n return self._invoke('update',\n {\n 'ipfix_l2_profile_id': ipfix_l2_profile_id,\n 'i_pfix_l2_profile': i_pfix_l2_profile,\n })", "def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)", "def set_profile(self, profile: str):\n self._profile = profile", "def update_my_profile(\n body: Optional[UserProfileUpdate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def update(self, **kwargs: Any):\n if not kwargs:\n return False\n for key, value in kwargs.items():\n if key.lower() == _PROFILE.lower():\n self._set_profile(value)\n else:\n try:\n self._config_parser.set(self.profile, key, str(value))\n except NoSectionError:\n # Create and set default profile if it does not exist in .bonsaiconfig\n self._set_profile(self.profile)\n self._config_parser.set(self.profile, key, str(value))\n\n if not self._write_dot_bonsaiconfig():\n return False\n\n self._parse_config(self.profile)\n\n return True", "def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def test_update_payment_profile(self):\n self.cim.update_payment_profile(\n customer_profile_id=u\"122\",\n customer_payment_profile_id=u\"444\",\n card_number=u\"422222222222\",\n expiration_date=u\"2009-10\"\n )", "def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]", "def show_network_profile(self, profile, **params):\r\n return self.get(self.network_profile_path % (profile), params=params)", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def users_profile_update(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n token = auth.current_user()[1]\n content = request.form\n password = content[\"password\"] if \"password\" in content else None\n fullname = content[\"fullname\"] if \"fullname\" in content else None\n phone_number = content[\"phone_number\"] if \"phone_number\" in content else None\n photo = Photo.from_bytes(request.files['photo'].stream) if 'photo' in request.files else None\n try:\n self.auth_server.profile_update(email=email_query, user_token=token,\n password=password, fullname=fullname,\n phone_number=phone_number, photo=photo)\n except UnauthorizedUserError:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % email_query)\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % email_query), 404\n return messages.SUCCESS_JSON, 200", "def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def ModifyNetwork(self, network, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/networks/%s/modify\" %\n (GANETI_RAPI_VERSION, network)), None, kwargs)", "def updateFromURL_(self, url):\r\n \r\n LogDebug(u\"updateFromURL:%@\", url)\r\n \r\n if self.profileUpdateWindow:\r\n # Show the progress window.\r\n self.progressBar.setIndeterminate_(True)\r\n self.progressBar.startAnimation_(self)\r\n self.profileUpdateWindow.makeKeyAndOrderFront_(self)\r\n \r\n # Create a buffer for data.\r\n self.profileUpdateData = NSMutableData.alloc().init()\r\n # Start download.\r\n request = NSURLRequest.requestWithURL_(url)\r\n self.connection = NSURLConnection.connectionWithRequest_delegate_(request, self)\r\n LogDebug(u\"connection = %@\", self.connection)\r\n if not self.connection:\r\n LogWarning(u\"Connection to %@ failed\", url)\r\n if self.profileUpdateWindow:\r\n self.profileUpdateWindow.orderOut_(self)\r\n self.delegate.profileUpdateFailed_(error)" ]
[ "0.76141626", "0.7451812", "0.69024754", "0.68228656", "0.66997266", "0.6656515", "0.66172934", "0.6465898", "0.6317081", "0.6289909", "0.61952424", "0.610687", "0.60899025", "0.606742", "0.597912", "0.5957542", "0.59494334", "0.5936277", "0.59313524", "0.59103763", "0.5901446", "0.58308804", "0.58268136", "0.5805565", "0.57943666", "0.57891726", "0.5779616", "0.5774501", "0.57543665", "0.57503235" ]
0.8979735
0
Delete the network profile.
def delete_network_profile(self, profile): return self.delete(self.network_profile_path % profile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_network_profile(arn=None):\n pass", "def test_delete_profile(self):\n self.cim.delete_profile(customer_profile_id=u\"123\")", "def DeleteWiredNetworkProfile(self, profilename):\n profilename = misc.to_unicode(profilename)\n print \"Deleting wired profile for \" + str(profilename)\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n if config.has_section(profilename):\n config.remove_section(profilename)\n else:\n return \"500: Profile does not exist\"\n config.write(open(self.wired_conf, \"w\"))\n return \"100: Profile Deleted\"", "def remove_saved_profile(self, profile):\n\n self.profiles.remove(profile)\n gamedata.GameData._delete_game_data_file(path.join(self.save_dir, profile.player_name + '.yaml'))", "def delete(name: str):\n profiles = prefect.settings.load_profiles()\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n current_profile = prefect.context.get_settings_context().profile\n if current_profile.name == name:\n exit_with_error(\n f\"Profile {name!r} is the active profile. You must switch profiles before\"\n \" it can be deleted.\"\n )\n\n profiles.remove_profile(name)\n\n verb = \"Removed\"\n if name == \"default\":\n verb = \"Reset\"\n\n prefect.settings.save_profiles(profiles)\n exit_with_success(f\"{verb} profile {name!r}.\")", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete(self,\n tunnel_profile_id,\n ):\n return self._invoke('delete',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n })", "def delete(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.delete_instance_profile(**params)", "def delete_profile(subscription_key, profile_id):\r\n\r\n helper = VerificationServiceHttpClientHelper.VerificationServiceHttpClientHelper(subscription_key)\r\n\r\n helper.delete_profile(profile_id)\r\n\r\n print('Profile {0} has been successfully deleted.'.format(profile_id))", "def remove_vpn_profile(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n profile_type = kwargs['profile_type']\n\n match profile_type:\n case \"ike\":\n profile = \"ipsec-vpn-ike-profiles\"\n case \"ipsec\":\n profile = \"ipsec-vpn-tunnel-profiles\"\n case \"dpd\":\n profile = \"ipsec-vpn-dpd-profiles\"\n case other:\n print(\"Invalid profile type\")\n sys.exit(1)\n\n json_response_status_code = delete_vpn_profile(proxy, session_token, display_name, profile)\n if json_response_status_code == 200:\n sys.exit(f\"Tier-1 VPN service {display_name} was deleted successfully\")\n else:\n print(f\"There was an error deleting Tier1 VPN service {display_name}\")\n sys.exit(1)", "def test_pm_profile_remove(profile_manager, test_profile):\n profile_manager.activate(test_profile.name)\n profile_manager.delete(test_profile.name)\n assert not profile_manager.is_active(test_profile.name)\n assert not os.path.exists(test_profile.path)", "def delete(self, request, flavor_profile_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor_profile(flavor_profile_id,\n ignore_missing=True)", "def delete(self, entity, schema):\n if schema == CoreProfile.__schema__:\n raise APIBadRequest('Cannot delete the core profile.')\n\n profile = entity.profiles.get_or_404(schema=schema)\n profile.delete()\n return '', 200", "async def test_delete(self):\n rsps = respx.delete(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.delete_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def delete_profile(profile_id):\n \n profile = mongo.db.profiles\n profile.delete_one({'_id': ObjectId(profile_id)})\n flash('Your profile has been deleted.', 'success')\n return redirect(url_for('dashboard'))", "def test_delete_payment_profile(self):\n self.cim.delete_payment_profile(\n customer_profile_id=u\"123\",\n customer_payment_profile_id=u\"432\"\n )", "def delprofile(variable, account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n json_metadata = Profile(acc[\"json_metadata\"])\n\n for var in variable:\n json_metadata.remove(var)\n\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)", "def delete(profile, name):\n # Make sure the instance profile exists.\n if not exists(profile, name):\n msg = \"No instance profile '\" + str(name) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Now try to delete it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"delete\", params)\n\n # Check that it was, in fact, deleted.\n if exists(profile, name):\n msg = \"The instance profile '\" + str(name) + \"' was not deleted.\"\n raise ResourceNotDeleted(msg)", "def sqdel_profile(self, profile_to_del):\r\n self.cursor.execute(\"DROP TABLE IF EXISTS \" + profile_to_del)", "def delete_profile(cls, id):\n return cls.objects.filter(id == id).delete()", "def fusion_api_delete_server_profile(self, name=None, uri=None, param='', api=None, headers=None):\n return self.profile.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete_user_profile(IamUserArn=None):\n pass", "def on_remove_profile_action(self):\n profile = self.Manager.profile\n\n # TODO: while it can probably be safely assumed that currentData() and currentIndex() will refer to the same profile as Manager.profile, it is NOT guaranteed; we should either verify that they are indeed the same or search the model for the profile name (as pulled from the manager) to avoid the issue altogether\n if message('warning', 'Confirm Delete Profile',\n 'Delete \"' + profile.name + '\"?',\n 'Choosing \"Yes\" below will remove this profile '\n 'and all saved information within it, including '\n 'customized load-orders, ini-edits, etc. Note '\n 'that installed mods will not be affected. This '\n 'cannot be undone. Do you wish to continue?'):\n self.Manager.delete_profile(\n self._selector.currentData())\n self._selector.removeItem(\n self._selector.currentIndex())", "def test_delete_profile(self):\n url = self.url\n url = url + '{}/'.format(\n self.profile.pk\n )\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n self.assertEqual(Profile.objects.count(), 1)", "def delete(self,\n ike_profile_id,\n ):\n return self._invoke('delete',\n {\n 'ike_profile_id': ike_profile_id,\n })", "def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)", "def remove_profile(self, request, *args, **kwargs):\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = RemoveProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)", "def fusion_api_delete_hypervisor_cluster_profile(self, name=None, uri=None, api=None, headers=None):\n return self.cluster_profile.delete(name, uri, api, headers)", "def delete(self,\n dpd_profile_id,\n ):\n return self._invoke('delete',\n {\n 'dpd_profile_id': dpd_profile_id,\n })", "def deleteSocialAuthentication(self, network):\n\t\turl = \"https://habitica.com/api/v3/user/auth/social/\" + network\n\t\treturn(deleteUrl(url, self.credentials))" ]
[ "0.84156525", "0.74447817", "0.72688985", "0.7117286", "0.70243156", "0.6956503", "0.6906419", "0.6875007", "0.68728507", "0.68238163", "0.67722034", "0.6724106", "0.6704582", "0.670159", "0.66890985", "0.6682457", "0.66775584", "0.66704357", "0.65861684", "0.65540975", "0.6553598", "0.6551222", "0.6514863", "0.65001625", "0.64989495", "0.64565015", "0.64241195", "0.6413774", "0.6397232", "0.6388453" ]
0.90215075
0
Update a policy profile.
def update_policy_profile(self, profile, body=None): return self.put(self.policy_profile_path % (profile), body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_policy(self, *args, **kwargs):\r\n pass", "def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)", "def update_policy(self):\n pass", "def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]", "def UpdatePolicy(self, request, global_params=None):\n config = self.GetMethodConfig('UpdatePolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)", "def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)", "def test_update_payment_profile(self):\n self.cim.update_payment_profile(\n customer_profile_id=u\"122\",\n customer_payment_profile_id=u\"444\",\n card_number=u\"422222222222\",\n expiration_date=u\"2009-10\"\n )", "def update_profile(profile_id):\n \n profile = mongo.db.profiles\n profile.find_one_and_update({'_id': ObjectId(profile_id)},\n {'$set': {'date': datetime.utcnow(),\n 'headline': request.form.get('headline'),\n 'bio': request.form.get('bio'),\n 'xp': request.form.get('xp'),\n 'interests': request.form.get('interests'),\n 'stack': request.form.get('stack'),\n 'languages': request.form.get('languages'),\n 'frameworks': request.form.get('frameworks'),\n 'github': request.form.get('github'),\n 'linkedin': request.form.get('linkedin')\n }\n }\n )\n return redirect(url_for('dashboard'))", "def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })", "def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass", "def update_policy(policy_id):\n old_policy = PolicyService.get_policy_by_id(policy_id)\n if old_policy is None:\n abort(404)\n new_policy = PolicyService.update_policy_by_id(policy_id, json_to_policy(request.json))\n if new_policy is None:\n abort(406)\n return new_policy.__dict__", "def update(self,\n dpd_profile_id,\n ip_sec_vpn_dpd_profile,\n ):\n return self._invoke('update',\n {\n 'dpd_profile_id': dpd_profile_id,\n 'ip_sec_vpn_dpd_profile': ip_sec_vpn_dpd_profile,\n })", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })", "def put(self, request, flavor_profile_id):\n update_flavor_profile(request)", "def update_policy(self, policy, inverse_policy=None):\n self.make_T_policy_matrix(policy)\n self.inverse_dynamics_by_time = dict()\n self.policy = policy\n self.inverse_policy = inverse_policy", "async def test_update(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps({'name': 'new name'}).encode('utf-8')", "def test_update_ikepolicy(self):\r\n resource = 'ikepolicy'\r\n cmd = ikepolicy.UpdateIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def update_policy(ranger_url, policy_id, policy_data, admin_username_password):\n\n url = format(\"{ranger_url}/service/public/v2/api/policy/{policy_id}\")\n\n base_64_string = base64.encodestring(admin_username_password).replace('\\n', '')\n\n request = urllib2.Request(url, json.dumps(policy_data))\n request.get_method = lambda: 'PUT'\n request.add_header('Content-Type', 'application/json')\n request.add_header('Accept', 'application/json')\n request.add_header('Authorization', format('Basic {base_64_string}'))\n\n try:\n result = openurl(request, timeout=20)\n response_code = result.getcode()\n if response_code == 200:\n Logger.info(format(\"Successfully updated policy in Ranger Admin\"))\n return response_code\n else:\n Logger.error(format(\"Unable to update policy in Ranger Admin\"))\n return None\n except urllib2.HTTPError as e:\n raise Fail(\"HTTPError while updating policy Reason = \" + str(e.code))\n except urllib2.URLError as e:\n raise Fail(\"URLError while updating policy. Reason = \" + str(e.reason))\n except TimeoutError:\n raise Fail(\"Connection timeout error while updating policy\")\n except Exception as err:\n raise Fail(format(\"Error while updating policy. Reason = {err}\"))", "def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]", "def update_my_profile(\n body: Optional[UserProfileUpdate] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyProfile.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def device_update_policy(self, device_ids, policy_id):\n return self._device_action(device_ids, \"UPDATE_POLICY\", {\"policy_id\": policy_id})", "def test_update_ipsecpolicy(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def policy_update_fn(self, data: Dict[str, Any], result: Dict[str, Any]) -> None:", "def update(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('update',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def put(self, request, l7_policy_id):\n kwargs = {'l7_policy_id': l7_policy_id}\n update_l7_policy(request, **kwargs)", "def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200", "def update_profile(orcid_id, data=None):\n \n u = db.session.query(User).filter_by(orcid_id=orcid_id).first()\n if u:\n u.updated = datetime.utcnow()\n if data:\n u.profile = data\n # save the user\n db.session.begin_nested()\n try:\n db.session.add(u)\n db.session.commit()\n except exc.IntegrityError as e:\n db.session.rollback()\n # per PEP-0249 a transaction is always in progress \n db.session.commit()", "def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)" ]
[ "0.75090593", "0.7367581", "0.72250706", "0.6973589", "0.68901694", "0.67963797", "0.6726454", "0.6695949", "0.6664862", "0.64785635", "0.64703935", "0.6433767", "0.6402601", "0.63865376", "0.63848585", "0.6275494", "0.6220236", "0.61943", "0.618253", "0.6181913", "0.61352074", "0.61348075", "0.6132759", "0.61072224", "0.6097533", "0.60953945", "0.60913616", "0.6070478", "0.59971535", "0.5987663" ]
0.8853719
0
Creates a metering label.
def create_metering_label(self, body=None): return self.post(self.metering_labels_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def create_metering_label_rule(self, body=None):\r\n return self.post(self.metering_label_rules_path, body=body)", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def create_label(self, on, text: str):\n return tk.Label(on, font=self.FONT, bg=self.BG_COLOR, text=text)", "def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)", "def create_label(self, org, name):\n pass", "def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])", "def create_gen_labels(master: Widget) -> None:\r\n\r\n gen_label = Label(master, text='Gen:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n gen_label.pack(side=LEFT)\r\n self.gen_number = Label(master, text=0, font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n self.gen_number.pack(side=LEFT)", "def genLabel(self):\n self._nextlabelid += 1\n return CLABEL(self._nextlabelid)", "def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label", "def create_label(self):\n\n self.pc_label = Label(self.form_box, text=\"Primary Current [A]:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.sc_label = Label(self.form_box, text=\"Secondary Current [A]:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.avg_t_label = Label(self.form_box, text=\"Average Time [s]: \", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nwt_label = Label(self.form_box, text=\"network type (static/dynamic):\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_ip_label = Label(self.form_box, text=\"IpAddress:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_gw_label = Label(self.form_box, text=\"Gateway:\", anchor='nw', width=32, bg=self.design.color.secondary,\n font=('Arial', 15))\n self.nw_sm_label = Label(self.form_box, text=\"subnet mask:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_mca_label = Label(self.form_box, text=\"Mac Address:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))", "def __call__(self, *args, **kwargs) -> L:\n label = self._label_adapter.create_label(*args,\n document=self._document,\n **kwargs)\n self._current_labels.append(label)\n return label", "def label_maker(string, size, font='Courier'):\n label = GLabel(string)\n label.font = str(font) + '-' + str(size)\n return label", "def create_label(self, name):\n payload = self._build_params(name=name)\n return Label.deserialize(self._post('labels', None, payload))", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def create_metric(self) -> EvalMetric:\n pass", "def label(self, color):\r\n return Label(self, color)", "def create_label(image_name,number):\r\n\r\n target=[]\r\n for i in range(0,number):\r\n target.append(0)\r\n target[image_name]=1\r\n\r\n return target", "def Label(self) -> str:", "def get_T_label(\n self,\n x_val: float,\n graph: ParametricFunction,\n label: float | str | Mobject | None = None,\n label_color: ParsableManimColor | None = None,\n triangle_size: float = MED_SMALL_BUFF,\n triangle_color: ParsableManimColor | None = WHITE,\n line_func: Line = Line,\n line_color: ParsableManimColor = YELLOW,\n ) -> VGroup:\n\n T_label_group = VGroup()\n triangle = RegularPolygon(n=3, start_angle=np.pi / 2, stroke_width=0).set_fill(\n color=triangle_color,\n opacity=1,\n )\n triangle.height = triangle_size\n triangle.move_to(self.coords_to_point(x_val, 0), UP)\n if label is not None:\n t_label = self.x_axis._create_label_tex(label, color=label_color)\n t_label.next_to(triangle, DOWN)\n T_label_group.add(t_label)\n\n v_line = self.get_vertical_line(\n self.i2gp(x_val, graph),\n color=line_color,\n line_func=line_func,\n )\n\n T_label_group.add(triangle, v_line)\n\n return T_label_group", "def _createLabel(element, a, state):\n # len(e.symbol) is 1 or 2 => a % (either 1000 or 100)\n # => gives exact a, or last two digits.\n # the division by 10 removes the last digit.\n firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10\n # the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3\n lastDigit = (\n \"0123456789\" \"ABCDEFGHIJ\" \"KLMNOPQRST\" \"UVWXYZabcd\"[(a % 10) + state * 10]\n )\n return \"{}{}{}\".format(element.symbol, firstTwoDigits, lastDigit)", "def _create_signal_label(self, master, row, column, signame, minimum, maximum, unit):\n lbl_name = Label(master, text=signame, bg=COLUMN_COLOR_LIST[column], font=(\"Helvetica\", 12))\n lbl_min = Label(master, text=minimum, bg=COLUMN_COLOR_LIST[column], font=(\"Helvetica\", 9))\n lbl_max = Label(master, text=maximum, bg=COLUMN_COLOR_LIST[column + 1], font=(\"Helvetica\", 9))\n lbl_unit = Label(master, text=unit, bg=COLUMN_COLOR_LIST[column + 2], font=(\"Helvetica\", 9))\n lbl_name.grid(row=row, column=column, columnspan=3, sticky=W+E)\n lbl_min.grid(row=row+1, column=column, sticky=W+E)\n lbl_max.grid(row=row+1, column=column+1, sticky=W+E)\n lbl_unit.grid(row=row+1, column=column+2, sticky=W+E)", "def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge", "def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }", "def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label", "def label(self):\r\n if isinstance(self.Lbeta, str):\r\n result = self.Lbeta\r\n else:\r\n result = 'T%.2d' % int(round(self.Lbeta))\r\n result += 'E%.2d' % int(round(self.E))\r\n result += 'G%.2d' % int(round(self.minTauG))\r\n result += self.insulation\r\n return result", "def __init__(self, text, separator_line_thickness, label_type, dpi=(600, 600)):\n \n def get_text_on_label(text, label_type):\n \"\"\"Format how the text will look on the label.\n \n text - Text to be placed on the label.\n label_type - One of the types specifying the label layout.\n \"\"\"\n text_on_label = \"\".join([c for c in text if c in string.ascii_letters + string.digits])\n if label_type == 0:\n text_on_label = \"\"\n elif label_type == 1 or label_type == 2 or label_type == 4:\n text_on_label = \"\\n\".join([text_on_label[:4],\n text_on_label[4:8],\n text_on_label[8:12],\n text_on_label[12:]])\n elif label_type == 3:\n text_on_label = \"\\n\".join([\"-\".join([text_on_label[:4],\n text_on_label[4:8]]),\n \"-\".join([text_on_label[8:12],\n text_on_label[12:]])])\n else:\n text_on_label = \"\"\n return text_on_label\n \n self.label_image = None\n self.text_on_label = get_text_on_label(text, label_type)\n self.label_type = label_type\n self.separator_line_thickness = separator_line_thickness\n self.dpi = dpi", "def __init__(self, label):\n self.label = label", "def _assign_label(self, format):\n cht_tmpl = self.out_label_tmpl\n return cht_tmpl.substitute(format)" ]
[ "0.79385513", "0.69731164", "0.6796265", "0.6574569", "0.6507201", "0.64291316", "0.63145757", "0.61448723", "0.607408", "0.59968865", "0.59874487", "0.5945679", "0.5923062", "0.59200543", "0.58582246", "0.576239", "0.5743417", "0.57324505", "0.57050675", "0.5694959", "0.5686932", "0.567331", "0.5668676", "0.56523937", "0.56200093", "0.5607177", "0.5605984", "0.56043416", "0.558822", "0.55717874" ]
0.7930899
1
Deletes the specified metering label.
def delete_metering_label(self, label): return self.delete(self.metering_label_path % (label))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def delete_metering_label_rule(self, rule):\r\n return self.delete(self.metering_label_rule_path % (rule))", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delData(self, label):\n\n return self._data.pop(label, None)", "def _del_label(self):\n label = self.combobox.currentText()\n if label:\n button = QMessageBox.warning(self, \"Delete label\", \n \"Are you sure that you want to delete label %s ?\" % label,\n QMessageBox.Yes,\n QMessageBox.No)\n if button == QMessageBox.Yes:\n self._label_config.remove_label(str(label))\n self._update_combobox()", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "def remove_cluster(self, label):\n del self._clusters[label]", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def delete(self, label):\n if label in self.bindings:\n if not self.locked:\n i = self.bindings[label]\n del self.bindings[label]\n return i\n else:\n if self.parent:\n return self.parent.delete(label)\n else:\n raise SnekEvaluationError('attempting to delete non-existing name {}'.format(label))", "def delete_label(id):\n dao.delete_label(id)\n return jsonify(dao.get_label(id))", "def RemoveLabel(self, label):\n if self.labels is None:\n self.labels = set()\n else:\n try:\n self.labels.remove(label)\n except KeyError:\n pass", "def remove_label(self, key: str):\n del self.labels[key]", "def delInfo(label: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n del self.__identity_info[label]\r\n else:\r\n raise HDDOPermissionException('Tried to delete non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to delete identity information from a closed HealthDominoDataObject.')", "def removeLabelFromPage(self, label, page):\n return self.pm_getSpaceManager().removeLabelFromPage(self._unbox(label), self._unbox(page))", "def __delitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n del self.docs[doc_label]", "def delete_manifest_label(label_uuid, tag_manifest):\n\n # Find the label itself.\n label = get_manifest_label(label_uuid, tag_manifest)\n if label is None:\n return None\n\n if not label.source_type.mutable:\n raise DataModelException(\"Cannot delete immutable label\")\n\n # Delete the mapping records and label.\n (TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())\n\n deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n label.delete_instance(recursive=False)\n return label", "def delete_label(self, label_key):\n # type: (str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response_result = self.connection.api_call(\n \"DELETE\",\n [\"v1\", \"datasets\", self.dataset_id, \"resources\", self.id, \"labels\", label_key],\n headers=headers,\n )\n\n if response_result:\n # Sync the latest data from API to prevent inconsistency\n self.refresh()\n\n return True", "def remove_device_label(self, device_id: str, label_id: str):\n return remove_device_label(self.api_key, device_id, label_id)", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def removeLabelFromSpace(self, label, space):\n return self.pm_getSpaceManager().removeLabelFromSpace(self._unbox(label), self._unbox(space))", "def delete_gauge(self, slug):\n key = self._gauge_key(slug)\n self.r.delete(key) # Remove the Gauge\n self.r.srem(self._gauge_slugs_key, slug) # Remove from the set of keys", "def delete_inV(self, *labels):\r\n self._simple_deletion('inV', labels)", "def delete(self, name):\n self.backend.delete(name)", "def remove_metric(self, metric):\n self.metrics.remove(metric)\n self.estimate()", "def delete_inE(self, *labels):\r\n self._simple_deletion('inE', labels)", "def post_delete_metrics(sender, **kwargs):\r\n tags = _database_tags('deleted', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def del_value (self, label):\n return wrapped (win32api.RegDeleteValue, self.pyobject (), label)", "def del_med_by_id(self):\n return \"DELETE FROM medic WHERE id = %s\"" ]
[ "0.8053136", "0.7480608", "0.6949647", "0.68498945", "0.64869463", "0.6456922", "0.6299056", "0.6226198", "0.6221507", "0.61977166", "0.6192721", "0.61167806", "0.6057763", "0.59570384", "0.595597", "0.59176546", "0.5915135", "0.5912779", "0.58964753", "0.5878231", "0.5839052", "0.57916826", "0.5594559", "0.55104405", "0.5490545", "0.54893076", "0.5487818", "0.54855", "0.54837286", "0.54421073" ]
0.9023373
0
Fetches a list of all metering labels for a tenant.
def list_metering_labels(self, retrieve_all=True, **_params): return self.list('metering_labels', self.metering_labels_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels(self):\n return get_labels(self.api_key)", "def get_labels():\n\n logging.info(\"Getting metadata about labels\")\n\n labels = []\n\n if len(args.labels) == 0:\n logging.warning(\"No labels specified, assuming all labels. If you have a lot of labels in your inbox you could hit API limits quickly.\")\n results = GMAIL_CLIENT.users().labels().list(userId='me').execute()\n\n labels = results.get('labels', [])\n else:\n logging.info('Using labels: %s ', args.labels)\n\n for label in args.labels:\n labels.append({'id': label})\n\n if not labels:\n logging.info('No labels found.')\n sys.exit()\n\n return labels", "def get_labels(self, uuid=None):\n return self._get_query('labels', self._build_params(uuid=uuid), Label)", "def getLabels(self) -> List[str]:\n\n results = self.service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n return labels", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "async def getTiers(self, ctx):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n\n if(len(tierList) > 0):\n await self.bot.say(\"Tiers:\")\n for tier in tierList:\n await self.bot.say(tier)\n else:\n await self.bot.say(\":x: No tiers in tier list\")", "def get_labels(self, uuids=None, name=None, pager=None):\n params = self._build_params(uuid=uuids, name=name)\n return Label.deserialize_list(self._get_multiple('labels', params, pager))", "def labels(self, number=-1, etag=None):\n url = self._build_url(\"labels\", base_url=self._api)\n return self._iter(int(number), url, label.ShortLabel, etag=etag)", "def get_all(self):\n data = {\n 'readByQuery': {\n 'object': 'EEACCOUNTLABEL',\n 'fields': '*',\n 'query': None,\n 'pagesize': '1000'\n }\n }\n\n return self.format_and_send_request(data)['data']['eeaccountlabel']", "def get_labels():\n return if_found(dao.get_labels())", "def list_metering_label_rules(self, retrieve_all=True, **_params):\r\n return self.list('metering_label_rules',\r\n self.metering_label_rules_path, retrieve_all,\r\n **_params)", "def get_labels(self) -> List[str]:\n return self.labels", "def get_labels(self):\n return [token.label for token in self.tokens]", "def labels_all(self):\n return self._labels_all", "def get_labels():\n json_request = request.json # get the json from the server\n keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids)\n labels = []\n for k in keys:\n # get the labels that the user input to the UI\n val = (json_request[k]['text'], json_request[k]['value'])\n labels.append(val)\n return labels", "def get_labels(self):\n return []", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def get_labels(self):\n return self.labels", "def ListLabels(service, user_id):\n try:\n response = service.users().labels().list(userId=user_id).execute()\n labels = response['labels']\n for label in labels:\n print ('Label id: %s - Label name: %s' % (label['id'], label['name']))\n return labels\n except errors.HttpError as error:\n print ('An error occurred: %s' % error)", "def get_all_labels(self):\n labels = self.wls_board.get_labels\n return labels", "def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])", "def get_labels(self) -> Set[str]:", "def get_metric_list(self) -> List[str]:\n ...", "def get_labels():\n session = get_session()\n try:\n query = session.query(Album.label).distinct()\n final_query = query\n labels = query.all()\n label_list = []\n for label in labels:\n label_list.append(label[0])\n count = final_query.count()\n return jsonify({\n 'items': label_list,\n 'count': count\n })\n finally:\n session.close()", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def get_label_list(\n self,\n project_id: int\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/labels'.format(\n project_id=project_id\n )\n )", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def _GetLabels(self, directory, scan_subdirs, label, predicate):\n\n labels = []\n\n # Go through all of the files (and subdirectories) in that\n # directory.\n for entry in dircache.listdir(directory):\n entry_label = self._GetLabelFromBasename(entry)\n # If the label is not valid then pretend it\n # does not exist. It would not be valid to create an entity\n # with such an id.\n if not self.IsValidLabel(entry_label):\n continue\n # Compute the full path to 'entry'.\n entry_path = os.path.join(directory, entry)\n # If it satisfies the 'predicate', add it to the list.\n if predicate(entry_path):\n labels.append(self.JoinLabels(label, entry_label))\n # If it is a subdirectory, recurse.\n if (scan_subdirs and os.path.isdir(entry_path)\n and self._IsSuiteFile(entry_path)):\n labels.extend(self._GetLabels(entry_path,\n scan_subdirs,\n self.JoinLabels(label, \n entry_label),\n predicate))\n\n return labels", "def get_labels(self):\n raise NotImplementedError", "def get_labels(self):\n\t\traise NotImplementedError()" ]
[ "0.6457788", "0.6242445", "0.6082684", "0.59228736", "0.59053916", "0.58714134", "0.5795048", "0.5738527", "0.56544113", "0.5619567", "0.5604264", "0.5585014", "0.55731374", "0.5507561", "0.5451812", "0.544326", "0.5435576", "0.5429042", "0.5415038", "0.54110825", "0.53918517", "0.53801924", "0.5348882", "0.5332649", "0.5322551", "0.52779275", "0.5277113", "0.5253381", "0.5232218", "0.52049017" ]
0.7327838
0
Fetches information of a certain metering label.
def show_metering_label(self, metering_label, **_params): return self.get(self.metering_label_path % (metering_label), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_meter(self, name):\n if name not in self.meters:\n return None\n return self.meters[name]", "def _get_meter(self, meter_name):\n meter = self.METRIC_MAP.get(meter_name)\n if meter is None:\n raise exception.MetricNotAvailable(metric=meter_name)\n return meter", "def getLabelInfo(self, label): # real signature unknown; restored from __doc__\n pass", "def show_metering_label_rule(self, metering_label_rule, **_params):\r\n return self.get(self.metering_label_rule_path %\r\n (metering_label_rule), params=_params)", "def get_label(cls, obj, query):\n\n if hasattr(obj, 'config'):\n for item in obj.config.hardware.device:\n if query == item.key:\n label = item.deviceInfo.label\n\n return label", "def get_data(self, label):\n self.application.get_data(label)", "def get_label_info(label):\n label_info = str(label)[2:-1].split(\"-\")\n return label_info", "def get_label(id):\n return if_found(dao.get_label(id))", "def _get_label(self):\n return self.label", "def _getData(self, label):\n\n try:\n return self._data[label]\n except KeyError:\n try:\n field = ATOMIC_FIELDS[label]\n except KeyError:\n return None\n else:\n return getattr(self, '_get' + field.meth_pl)()", "def getLabel(device):\n cmd = \"/sbin/blkid -s LABEL -o value %s\" % device\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n return proc.communicate()[0].strip()", "def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret", "def get_label(self):\n return self.job[self.label_key]", "def load_meter(self):\n return self._load_meter", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")", "def create_metering_label(self, body=None):\r\n return self.post(self.metering_labels_path, body=body)", "def get_meter_info(apt_no):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n payload = (\"select uuid, Metadata/Instrument/SupplyType \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=payload)\n # logger.debug (\"%s\",r)\n payload_body = r.json()\n # logger.debug (\"Payload:\\n%s\", payload_body)\n\n meters = []\n for i in range(0, len(payload_body)):\n meter = payload_body[i]\n\n meters.append({'uuid': meter['uuid'], 'type': meter[\n 'Metadata']['Instrument']['SupplyType']})\n\n return meters", "def get_metric(self, obj):\n if self.conn is None:\n return 0\n\n key = \"{}_metric\".format(obj)\n resp = self.conn.get(key)\n if resp is None:\n resp = 0\n else:\n resp = int(resp.decode('utf8'))\n return resp", "def list_metering_labels(self, retrieve_all=True, **_params):\r\n return self.list('metering_labels', self.metering_labels_path,\r\n retrieve_all, **_params)", "def get_info(self, name):\n return self.info[name]", "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def getLabel(self):\n return self.content[:12]", "def delete_metering_label(self, label):\r\n return self.delete(self.metering_label_path % (label))", "def get_label_detail(\n self,\n project_id: int,\n label_id: int\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/labels/{label_id}'.format(\n project_id=project_id,\n label_id=label_id\n )\n )", "def readmeter(self):\n # Read until /\n line = \"\"\n while not line.startswith(\"/\"):\n line = self.readline().decode()\n\n # Populate header\n header = line[1:].strip()\n\n # Skip empty line after header\n self.readline()\n\n # Read lines and populate dictionary until !\n data = {}\n line = self.readline().decode()\n while not line.startswith(\"!\"):\n # Get OBIS\n next_obis = line[:line.index(\"(\")]\n if next_obis:\n obis = next_obis\n data[obis] = []\n # Get and loop over the arguments\n args = re.findall(\"\\(([^()]*)\\)\", line)\n for arg in args:\n # Do some basic conversions\n valwithunit = re.match(\"^([0-9.]+)\\*([a-zA-Z]+)$\", arg)\n if valwithunit:\n arg = float(valwithunit[1]), valwithunit[2]\n # Save argument with corresponding OBIS\n data[obis].append(arg)\n line = self.readline().decode()\n return header, data", "def get_label ( self ):\n return self.label", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def get_label(settings):" ]
[ "0.6109366", "0.60958403", "0.6088569", "0.597872", "0.5956597", "0.5829163", "0.56682867", "0.56606156", "0.56462276", "0.55531365", "0.5501862", "0.54974216", "0.54537135", "0.5451163", "0.5431368", "0.5431368", "0.54304945", "0.53986526", "0.53809875", "0.53541225", "0.5329916", "0.53297853", "0.53162974", "0.5295582", "0.528167", "0.52687967", "0.52604985", "0.5251179", "0.52435076", "0.52186024" ]
0.73447627
0
Creates a metering label rule.
def create_metering_label_rule(self, body=None): return self.post(self.metering_label_rules_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_metering_label(self, body=None):\r\n return self.post(self.metering_labels_path, body=body)", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def create_metric(self) -> EvalMetric:\n pass", "def create_rule_labels(master: Widget) -> None:\r\n\r\n rule_label = Label(master, text='Rule:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n rule_label.pack(side=LEFT, padx=(50,0))\r\n self.rule_name = Label(master, text=self.INITIAL_RULE, font=self.FONT_NORMAL,\r\n bg=self.MAIN_BG)\r\n self.rule_name.pack(side=LEFT)", "def show_metering_label_rule(self, metering_label_rule, **_params):\r\n return self.get(self.metering_label_rule_path %\r\n (metering_label_rule), params=_params)", "def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)", "def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)", "def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()", "def delete_metering_label_rule(self, rule):\r\n return self.delete(self.metering_label_rule_path % (rule))", "def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }", "def create_metric(self) -> 'LossMetric':\n return PerplexityMetric(prefix=self._metric_prefix)", "def create_metric(self) -> 'LossMetric':\n return PerplexityMetric(prefix=self._metric_prefix)", "def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])", "def create_labeling_job(LabelingJobName=None, LabelAttributeName=None, InputConfig=None, OutputConfig=None, RoleArn=None, LabelCategoryConfigS3Uri=None, StoppingConditions=None, LabelingJobAlgorithmsConfig=None, HumanTaskConfig=None, Tags=None):\n pass", "def create_label(self, org, name):\n pass", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def __call__(self, *args, **kwargs) -> L:\n label = self._label_adapter.create_label(*args,\n document=self._document,\n **kwargs)\n self._current_labels.append(label)\n return label", "def make_bleu_metric_spec():\n def bleu_wrapper(predictions, labels, **kwargs):\n \"\"\"Remove the SEQUENCE_START token from the labels before\n feeding it into the bleu metric.\"\"\"\n return streaming_bleu(predictions, labels[:, 1:], **kwargs)\n\n return metric_spec.MetricSpec(\n metric_fn=bleu_wrapper,\n label_key=\"target_tokens\",\n prediction_key=\"predicted_tokens\")", "def create_labels(node, function_call, function_call_line):\n \n label_node = node.get_attributes()[\"label\"]\n label_node_from_function_line = label_node[function_call_line:]\n regex = re.escape(function_call) + r';\\\\l\\\\\\n'\n match_function = re.search(regex, label_node_from_function_line, re.S)\n \n bb = int(re.search(r'\\d+', re.search(r'<bb\\\\\\ \\d+\\\\>', label_node).group()).group())\n \n label_node1 = label_node[0:function_call_line] + '|call\\ ' + function_call + ';\\l\\\\\\n}\"'\n \n match_variable = re.search(r'\\w+\\\\ =\\\\ ' + re.escape(function_call), label_node_from_function_line)\n\n label_node2 = '\"{ FREQ:0 |\\<bb\\ ' + str(bb) + '\\>:\\l\\\\\\n|'\n \n if match_variable is not None:\n variable = re.search(r'\\w+', match_variable.group()).group()\n label_node2 += variable + '\\ =\\ '\n label_node2 += 'return\\ ' + function_call + ';\\\\l\\\\\\n' + label_node_from_function_line[match_function.end():]\n \n return (label_node1, label_node2, int(bb))", "def list_metering_label_rules(self, retrieve_all=True, **_params):\r\n return self.list('metering_label_rules',\r\n self.metering_label_rules_path, retrieve_all,\r\n **_params)", "def label_for(self, *pp, unit=True, description=True):\n if len(pp) > 1 and np.all([re.match(r\"k\\d+l\", p) for p in pp]):\n label = \"$k_nl$\"\n if unit:\n label += \" / $m^{-n}$\"\n return label\n return super().label_for(*pp, unit=unit, description=description)", "def genLabel(self):\n self._nextlabelid += 1\n return CLABEL(self._nextlabelid)", "def make_label_event(self, line):\n ret = MLFLabelEvent()\n tokens = line.split()\n n = len(tokens)\n if n == 1:\n ret.word_label = tokens[0]\n ret.start = None\n ret.end = None\n ret.phone_label = None\n ret.score = None\n return ret\n if 4 <= n <= 5:\n ret.start = int(tokens[0])\n ret.end = int(tokens[1])\n ret.phone_label = tokens[2]\n ret.score = float(tokens[3])\n if n == 5:\n ret.word_label = tokens[4]\n else:\n ret.word_label = None\n return ret\n msg = (\"Unexpected number of tokens (%d) in stream (file %s, line %d): %s\" % (\n n, self._filename, self._line_count, line))\n raise MLFProcessorError(msg)", "def Create(cls, group_key, machine_id, timestamp, payload):\n sort_key = util.CreateSortKeyPrefix(timestamp, randomness=False) + machine_id\n metric = Metric(group_key, sort_key)\n metric.machine_id = machine_id\n metric.timestamp = timestamp\n metric.payload = payload\n return metric", "def __init__(self):\n super().__init__()\n self.metric = 'TN'", "def rule110_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1],\n [0, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [0, 0, 0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])", "def create_labels(base, map_layer_element, label_dict):\n labeling_element = base.xml_document.createElement(\"labeling\")\n labeling_element.setAttribute(\"type\", label_dict['labelValues']['type'])\n map_layer_element.appendChild(labeling_element)\n\n settings_element = base.xml_document.createElement(\"settings\")\n\n if label_dict['labelValues']['type'] == 'rule-based':\n\n rules_element = base.xml_document.createElement(\"rules\")\n rules_element.setAttribute(\"key\", \"{a5f41347-c300-4ee5-b\" + str(random.randint(0, 9)) + \"fb-08d965baec1e}\")\n labeling_element.appendChild(rules_element)\n\n rule_element = base.xml_document.createElement(\"rule\")\n rule_element.setAttribute(\"key\", \"{feb232af-8b6c-4\" + str(random.randint(10, 99)) + \"c-b3e9-f27523dbd2f7}\")\n rule_element.setAttribute(\"filter\", '\"SymbolID\" = ' + label_dict[\"labelValues\"][\"classId\"])\n rules_element.appendChild(rule_element)\n rule_element.appendChild(settings_element)\n\n elif label_dict['labelValues']['type'] == 'simple':\n labeling_element.appendChild(settings_element)\n\n LabelRenderer.create_text_style_element(settings_element, base.xml_document, label_dict)", "def create_label(image_name,number):\r\n\r\n target=[]\r\n for i in range(0,number):\r\n target.append(0)\r\n target[image_name]=1\r\n\r\n return target", "def _monomer_pattern_label(mp):\n site_strs = []\n for site, cond in mp.site_conditions.items():\n if isinstance(cond, tuple) or isinstance(cond, list):\n assert len(cond) == 2\n if cond[1] == WILD:\n site_str = '%s_%s' % (site, cond[0])\n else:\n site_str = '%s_%s%s' % (site, cond[0], cond[1])\n elif isinstance(cond, numbers.Real):\n continue\n else:\n site_str = '%s_%s' % (site, cond)\n site_strs.append(site_str)\n return '%s_%s' % (mp.monomer.name, '_'.join(site_strs))", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"" ]
[ "0.6513352", "0.6434864", "0.6325153", "0.58857507", "0.56241167", "0.55774724", "0.55774724", "0.5566286", "0.54233354", "0.5421766", "0.53225523", "0.53225523", "0.52733827", "0.52690357", "0.5225439", "0.51690835", "0.51054645", "0.5063008", "0.50395566", "0.503811", "0.5009828", "0.49837306", "0.49819812", "0.49812743", "0.49359187", "0.49354744", "0.48520616", "0.48484284", "0.48472422", "0.4835523" ]
0.82270217
0
Deletes the specified metering label rule.
def delete_metering_label_rule(self, rule): return self.delete(self.metering_label_rule_path % (rule))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_metering_label(self, label):\r\n return self.delete(self.metering_label_path % (label))", "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "def delete_rule(self, index):\n del self.rules[index]", "def dscp_marking_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_dscp_marking_rule(rule_id, policy_id)", "def DeleteRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def _delete_rule(cls, rule_suffix: str) -> None:\n delete_rule = cls._build_rule_string(IpTableCommandOption.DELETE, rule_suffix)\n log.info('Delete rule \"%s\"', delete_rule)\n utils.run_command(delete_rule, shell=True)", "def delete(self,\n section_id,\n rule_id,\n ):\n return self._invoke('delete',\n {\n 'section_id': section_id,\n 'rule_id': rule_id,\n })", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delete(self, package=\"\", uid=\"\", params={}):\n return self.__post('delete-nat-rule', package, uid, params)", "def delete_snat_rule(self, rule, ignore_missing=True):\n return self._delete(_snat.Rule, rule, ignore_missing=ignore_missing)", "def bandwidth_limit_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_bandwidth_limit_rule(rule_id, policy_id)", "def minimum_bandwidth_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_minimum_bandwidth_rule(rule_id, policy_id)", "def delete_resolver_rule(ResolverRuleId=None):\n pass", "def removeRule(self, *args):\n return _libsbml.Model_removeRule(self, *args)", "def delete_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n\n del self.rules[rule_name]\n del self.rule_source[rule_name]\n del self.rule_str[rule_name]\n del self.rule_ft[rule_name]\n\n return True", "def delete_rule(self, value):\n\n if value >= 0:\n if sublime.ok_cancel_dialog('Are you sure you want to delete the rule: \\'%s\\'?' % self.keys[value]):\n del self.regex_rules[self.keys[value]]\n sublime.load_settings('reg_replace_rules.sublime-settings').set('replacements', self.regex_rules)\n sublime.save_settings('reg_replace_rules.sublime-settings')", "def delete(self, label):\n if label in self.bindings:\n if not self.locked:\n i = self.bindings[label]\n del self.bindings[label]\n return i\n else:\n if self.parent:\n return self.parent.delete(label)\n else:\n raise SnekEvaluationError('attempting to delete non-existing name {}'.format(label))", "def minimum_packet_rate_rule_delete(request, policy_id, rule_id):\n neutronclient(request).delete_minimum_packet_rate_rule(rule_id, policy_id)", "def delete_rule(self, id: str, rule_id: str) -> dict:\n r = requests.delete(self.url + '/{}/rules/{}'.format(id, rule_id), headers=self.headers)\n\n return r.json()", "def delete_rule(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-D\"] + rule\n return call(cmdline)", "def delete_rules(self, cr, uid, context=None):\r\n action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)\r\n return self.base_action_rule.unlink(cr, uid, action_ids, context=context)", "def removeLabelFromSpace(self, label, space):\n return self.pm_getSpaceManager().removeLabelFromSpace(self._unbox(label), self._unbox(space))", "def remove_metric(self, metric):\n self.metrics.remove(metric)\n self.estimate()", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def delData(self, label):\n\n return self._data.pop(label, None)", "def delete(self, request, l7_rule_id, l7_policy_id):\n conn = get_sdk_connection(request)\n retry_on_conflict(\n conn, conn.load_balancer.delete_l7_rule,\n l7_rule_id, l7_policy_id,\n load_balancer_getter=l7_policy_get_load_balancer_id,\n resource_id=l7_policy_id)", "def delete_nat_rule(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n nat_id = kwargs['objectname']\n tier1_id = kwargs['tier1_id']\n\n result = remove_sddc_nat_json(proxy, sessiontoken, nat_id, tier1_id)\n if result is not None:\n print(\"\\n\")\n params = {'proxy':proxy, 'sessiontoken':sessiontoken, 'objectname':nat_id, 'tier1_id':tier1_id}\n get_nat_rules(**params)\n else:\n print('Something went wrong. Please check your syntax and try again.')\n sys.exit(1)", "def test_delete_rule(self):\n pass" ]
[ "0.7421562", "0.6761197", "0.66971546", "0.64418876", "0.63509923", "0.63017774", "0.61208904", "0.60836786", "0.60674757", "0.60439885", "0.58794063", "0.58783823", "0.58492035", "0.5841357", "0.5773799", "0.575916", "0.5721856", "0.5683262", "0.5640768", "0.55792165", "0.5531356", "0.5513476", "0.5481472", "0.5461477", "0.5446291", "0.5427629", "0.5418401", "0.54151213", "0.5404518", "0.53972596" ]
0.87295634
0
Fetches a list of all metering label rules for a label.
def list_metering_label_rules(self, retrieve_all=True, **_params): return self.list('metering_label_rules', self.metering_label_rules_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def getListOfRules(self):\n return self.model.getListOfRules()", "def get_label_list():\n f_name = os.path.join(FLAGS.labels_dir, FLAGS.labels_name)\n if os.path.exists(f_name):\n with open(f_name, 'rb') as f:\n try:\n label_list = [line.rstrip('\\n') for line in f]\n except:\n print(\"Could not read file:\" + f_name)\n sys.exit()\n return label_list", "def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)", "def getListRulesPerModel(path_model):\n with open(path_model) as data_file: \n data_json = json.load(data_file)\n #pprint(len(data_json['rules']))\n list_rules = data_json['rules']\n return list_rules", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def explain_multiclass(self):\n res = list()\n if len(self._rule_map.items()) == 0:\n return DnfRuleSet([], self.target_label)\n for label, rules in self._rule_map.items():\n dnf_ruleset = self._rules_to_trxf_dnf_ruleset(rules, label)\n res.append(dnf_ruleset)\n default_rule = DnfRuleSet([], self.default_label)\n res.append(default_rule)\n return res", "def get(self, name):\n labels = self.list()\n return [\n label\n for label\n in labels\n if name == label.get('name')\n ]", "def _GetLabels(self, directory, scan_subdirs, label, predicate):\n\n labels = []\n\n # Go through all of the files (and subdirectories) in that\n # directory.\n for entry in dircache.listdir(directory):\n entry_label = self._GetLabelFromBasename(entry)\n # If the label is not valid then pretend it\n # does not exist. It would not be valid to create an entity\n # with such an id.\n if not self.IsValidLabel(entry_label):\n continue\n # Compute the full path to 'entry'.\n entry_path = os.path.join(directory, entry)\n # If it satisfies the 'predicate', add it to the list.\n if predicate(entry_path):\n labels.append(self.JoinLabels(label, entry_label))\n # If it is a subdirectory, recurse.\n if (scan_subdirs and os.path.isdir(entry_path)\n and self._IsSuiteFile(entry_path)):\n labels.extend(self._GetLabels(entry_path,\n scan_subdirs,\n self.JoinLabels(label, \n entry_label),\n predicate))\n\n return labels", "def DescribeRules(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeRules\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeRulesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create_metering_label_rule(self, body=None):\r\n return self.post(self.metering_label_rules_path, body=body)", "def get_rules_for_category(category):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE category_id = ?', (category,)).fetchall()\n\n return rules", "def get_labels():\n return if_found(dao.get_labels())", "def explain(self):\n assert (self.target_label is not None), 'Not fitted or not fitted for a specific pos value. Use export_rules ' \\\n 'in the latter case. '\n\n if len(self._rule_map.items()) == 0:\n return DnfRuleSet([], self.target_label)\n for label, rules in self._rule_map.items():\n if label == self.target_label:\n return self._rules_to_trxf_dnf_ruleset(rules, label)\n raise Exception('No rules found for label: ' + str(self.target_label))", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def normalize_label(label: str) -> list:\n global _LABELS\n if _LABELS is None:\n _LABELS = read_label_definitions(NORM_LABEL_FILE)\n if label in _LABELS['translate'].keys():\n return [_LABELS['translate'][label]]\n elif label in _LABELS['label_to_group'].keys():\n # Originally: return all (new) labels that came from splitting an old one\n #return _LABELS['label_to_group'][label]\n # Now: ignore labels that have been split, so as not to learn false positives\n return []\n else:\n print(\"Untranslated: \", label)\n return [label]", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def list_definition(self):\n return self._get(path='metrics')", "def get_metric_list(self) -> List[str]:\n ...", "def list_resolver_rules(MaxResults=None, NextToken=None, Filters=None):\n pass", "def get_rules(cls):\n raise NotImplementedError()", "def read_labels_from_file(label_identifier) -> list:\n global _LABELS\n if _LABELS is None:\n _LABELS = read_label_definitions(NORM_LABEL_FILE)\n filename = _LABELS['label_files'][label_identifier]\n print(f\"Reading {filename}\")\n with open(filename, 'r') as f:\n labels = [l.strip() for l in f.readlines() if l.strip() != '']\n return labels", "def label_list(self, labnames=None):\n vallabs = self._vallabs\n if labnames is None:\n labnames = vallabs.keys()\n else:\n if isinstance(labnames, str):\n labnames = (labnames,)\n elif (not isinstance(labnames, collections.Iterable)\n or not all(isinstance(value, str) for value in labnames)):\n raise TypeError(\"labnames should be str or iterable of str\") \n labnames = set(name for value in labnames\n for name in value.split())\n if not labnames.issubset(vallabs.keys()):\n bad_names = \", \".join(str(lbl) for lbl in \n labnames.difference(vallabs.keys()))\n raise KeyError(bad_names + \" are not defined labels\")\n for name in labnames:\n print(name + \":\")\n lbldict = vallabs[name]\n for value in lbldict:\n print(\"{:>12} {}\".format(value, lbldict[value]))", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def get_list_of_rules(app_stack_name):\n\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=app_stack_name,\n LogicalResourceId='ALBListenerSSL'\n )\n alb_listener = response['StackResources'][0]['PhysicalResourceId']\n\n client = boto3.client('elbv2')\n response = client.describe_rules(ListenerArn=alb_listener)\n return response['Rules']", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules", "def labels_all(self):\n return self._labels_all" ]
[ "0.57482624", "0.5516608", "0.5357591", "0.530031", "0.5222081", "0.5218295", "0.51783687", "0.5128793", "0.51048404", "0.5095553", "0.50337917", "0.50204724", "0.5004493", "0.4985553", "0.4963917", "0.49584508", "0.49472302", "0.49364412", "0.49233997", "0.49183097", "0.49097726", "0.4904089", "0.49020043", "0.48866034", "0.48762032", "0.48589623", "0.48233217", "0.48028216", "0.4801672", "0.47865263" ]
0.7437749
0
Fetches information of a certain metering label rule.
def show_metering_label_rule(self, metering_label_rule, **_params): return self.get(self.metering_label_rule_path % (metering_label_rule), params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_metering_label_rules(self, retrieve_all=True, **_params):\r\n return self.list('metering_label_rules',\r\n self.metering_label_rules_path, retrieve_all,\r\n **_params)", "def create_metering_label_rule(self, body=None):\r\n return self.post(self.metering_label_rules_path, body=body)", "def get(self, request):\n rule = self.get_object()\n return success(rule.summary())", "def get_label(cls, obj, query):\n\n if hasattr(obj, 'config'):\n for item in obj.config.hardware.device:\n if query == item.key:\n label = item.deviceInfo.label\n\n return label", "def show_metering_label(self, metering_label, **_params):\r\n return self.get(self.metering_label_path %\r\n (metering_label), params=_params)", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def getRule(self, *args):\n return _libsbml.Model_getRule(self, *args)", "def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret", "def getLabelInfo(self, label): # real signature unknown; restored from __doc__\n pass", "def delete_metering_label_rule(self, rule):\r\n return self.delete(self.metering_label_rule_path % (rule))", "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule", "def get_rule(rule_id):\n\n rule = get_db().execute('SELECT i.*, c.name as category_name FROM ruleset i JOIN categories c ON i.category_id = c.id WHERE i.id = ?', (rule_id, )).fetchone()\n\n return rule", "def get_label(self):\n return self.job[self.label_key]", "def request_measures(self):\n question = jbus.jbus_generator_read(self.node, 0x1060, 48)\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n #print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n result = self.verify_response(question, answer)\n if (result == \"OK\"):\n return answer\n else:\n self.error=result\n return False", "def find(self, rule_name):\n return self.rules[rule_name]", "def manhuas_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=MANHUA_TYPE_URI,\n rdf_type_name=MANHUA_TYPE_NAME, \n kls=Manhua)", "def test_get_rule_details(self):\n pass", "def _get_lsp_config_metric(self):\n return self.__lsp_config_metric", "def describe_labeling_job(LabelingJobName=None):\n pass", "def _get_next_hop_metric(self):\n return self.__next_hop_metric", "def readmeter(self):\n # Read until /\n line = \"\"\n while not line.startswith(\"/\"):\n line = self.readline().decode()\n\n # Populate header\n header = line[1:].strip()\n\n # Skip empty line after header\n self.readline()\n\n # Read lines and populate dictionary until !\n data = {}\n line = self.readline().decode()\n while not line.startswith(\"!\"):\n # Get OBIS\n next_obis = line[:line.index(\"(\")]\n if next_obis:\n obis = next_obis\n data[obis] = []\n # Get and loop over the arguments\n args = re.findall(\"\\(([^()]*)\\)\", line)\n for arg in args:\n # Do some basic conversions\n valwithunit = re.match(\"^([0-9.]+)\\*([a-zA-Z]+)$\", arg)\n if valwithunit:\n arg = float(valwithunit[1]), valwithunit[2]\n # Save argument with corresponding OBIS\n data[obis].append(arg)\n line = self.readline().decode()\n return header, data", "def getRuleByVariable(self, *args):\n return _libsbml.Model_getRuleByVariable(self, *args)", "def get_metric(self, obj):\n if self.conn is None:\n return 0\n\n key = \"{}_metric\".format(obj)\n resp = self.conn.get(key)\n if resp is None:\n resp = 0\n else:\n resp = int(resp.decode('utf8'))\n return resp", "def _get_l2_label(self):\n return self.__l2_label", "def get(self,\n section_id,\n rule_id,\n ):\n return self._invoke('get',\n {\n 'section_id': section_id,\n 'rule_id': rule_id,\n })", "def parse(cls, label) -> Any:\n return label", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text" ]
[ "0.5749725", "0.56896865", "0.53727543", "0.5339056", "0.52721786", "0.5223315", "0.5223315", "0.50914735", "0.507633", "0.50260407", "0.49995106", "0.49948278", "0.49948278", "0.49948278", "0.49808478", "0.49793372", "0.4972282", "0.49648386", "0.49490532", "0.4948544", "0.4924445", "0.49173084", "0.49139825", "0.49112943", "0.48880294", "0.48864567", "0.48853227", "0.4869367", "0.48503035", "0.48444837" ]
0.7088634
0
Fetch a list of all network partitions for a tenant.
def list_net_partitions(self, **params): return self.get(self.net_partitions_path, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_partitions(self):\n \n parts = client.Management.Partition.get_partition_list()\n partitions = []\n for part in parts:\n partitions.append(part['partition_name'])\n return partitions", "def list_partitions(self, partitioning):\n return []", "def get_partitions(self):\n return self.partitions", "def partitions(self):\n self._get_latest_content()\n return self._data.get('partitions', [])", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def partitions(self):\n return self._partitions", "def get_partitions(self, conditions):\n\n raise NotImplementedError('get_partitions')", "def get_partitions(disk):\n partitions = []\n script = [\n 'select disk {}'.format(disk['Number']),\n 'list partition']\n\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Append partition numbers\n output = result.stdout.decode().strip()\n regex = r'Partition\\s+(\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)\\s+'\n for tmp in re.findall(regex, output, re.IGNORECASE):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n partitions.append({'Number': num, 'Size': size})\n\n return partitions", "def getPartitionList(self, softwareProfileName):\n return self._sp_db_api.getPartitionList(softwareProfileName)", "def fetch_partitions(self, table_name):\n partition_result = self.query(\n sql.fetch_partition,\n (\n self._current_db,\n table_name,\n ),\n )\n # If a table doesn't have partition schema the \"PARTITION_NAME\"\n # will be string \"None\" instead of something considered as false\n # in python\n return [\n partition_entry[\"PARTITION_NAME\"]\n for partition_entry in partition_result\n if partition_entry[\"PARTITION_NAME\"] != \"None\"\n ]", "def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass", "def list_partitions(basename):\n partitions = []\n for name in os.listdir(os.path.join('/sys/block', basename)):\n if name.startswith(basename):\n partitions.append(name)\n return partitions", "def _get_partitions():\n partitions = []\n\n try:\n with files.FileReader('/proc/partitions') as f:\n lines = f.readlines()[2:]\n for line in lines:\n _, _, _, name = line.split()\n if name[-1].isdigit():\n partitions.append(name)\n # This will catch access denied and file not found errors, which is expected\n # on non-Linux/limited access systems. All other errors will raise as normal.\n except files.Error:\n pass\n\n return partitions", "def network_list_for_tenant(request, tenant_id, include_external=False,\n include_pre_auto_allocate=False, page_data=None,\n **params):\n\n # Pagination is implemented consistently with nova and cinder views,\n # which means it is a bit hacky:\n # - it requests X units but displays X-1 units\n # - it ignores the marker metadata from the API response and uses its own\n # Here we have extra hacks on top of that, because we have to merge the\n # results of 3 different queries, and decide which one of them we are\n # actually paginating.\n # The 3 queries consist of:\n # 1. Shared=True networks\n # 2. Project non-shared networks\n # 3. External non-shared non-project networks\n # The main reason behind that order is to maintain the current behavior\n # for how external networks are retrieved and displayed.\n # The include_external assumption of whether external networks should be\n # displayed is \"overridden\" whenever the external network is shared or is\n # the tenant's. Therefore it refers to only non-shared non-tenant external\n # networks.\n # To accomplish pagination, we check the type of network the provided\n # marker is, to determine which query we have last run and whether we\n # need to paginate it.\n\n LOG.debug(\"network_list_for_tenant(): tenant_id=%(tenant_id)s, \"\n \"params=%(params)s, page_data=%(page_data)s\", {\n 'tenant_id': tenant_id,\n 'params': params,\n 'page_data': page_data,\n })\n\n page_data, marker_net = _configure_pagination(\n request, params, page_data, tenant_id=tenant_id)\n\n query_kwargs = {\n 'request': request,\n 'include_external': include_external,\n 'tenant_id': tenant_id,\n 'page_data': page_data,\n **params,\n }\n\n return _perform_query(\n _query_nets_for_tenant, query_kwargs, marker_net,\n include_pre_auto_allocate)", "def test_get_node_partitions(self):\n pass", "def disk_partitions(all=False):\n phydevs = []\n f = open(\"/proc/filesystems\", \"r\")\n for line in f:\n if not line.startswith(\"nodev\"):\n phydevs.append(line.strip())\n\n retlist = []\n f = open('/etc/mtab', \"r\")\n for line in f:\n if not all and line.startswith('none'):\n continue\n fields = line.split()\n device = fields[0]\n mountpoint = fields[1]\n fstype = fields[2]\n if not all and fstype not in phydevs:\n continue\n if device == 'none':\n device = ''\n ntuple = disk_ntuple(device, mountpoint, fstype)\n retlist.append(ntuple)\n return retlist", "def convert_partitions_to_list(partition):\n parts = list()\n for part in partition:\n parts.append(part)\n return parts", "def iter_partitions(cls):\n for part_index in xrange(cls.number_of_partitions):\n yield cls.partition_indexed(part_index)", "def get_partitions_for_topic(self, topic):\n return self.client.cluster._partitions[topic]", "def partitions(self, topic):\n kc = KafkaCat(self)\n md = kc.metadata()\n topic = next(filter(lambda t: t[\"topic\"] == topic, md[\"topics\"]))\n\n def make_partition(p):\n index = p[\"partition\"]\n leader_id = p[\"leader\"]\n leader = None if leader_id == -1 else self.get_node(leader_id)\n replicas = [self.get_node(r[\"id\"]) for r in p[\"replicas\"]]\n return Partition(index, leader, replicas)\n\n return [make_partition(p) for p in topic[\"partitions\"]]", "def list_all_partitions():\n dev_part_list = {}\n for name in os.listdir('/sys/block'):\n # /dev/fd0 may hang http://tracker.ceph.com/issues/6827\n if re.match(r'^fd\\d$', name):\n continue\n if not os.path.exists(os.path.join('/sys/block', name, 'device')):\n continue\n dev_part_list[name] = list_partitions(name)\n return dev_part_list", "def get_partitions_hyperplanes(self, encodings, num_splits, margin, num_partitions):\n splits = self.get_splits_hyperplanes(encodings=encodings, num_splits=num_splits, margin=margin)\n bad_partitions = 0\n partitions = []\n for i in tqdm(range(num_partitions), desc='get_partitions_hyperplanes'):\n partition, num_failed = self.get_partition_from_splits(splits)\n partitions.append(partition)\n bad_partitions += num_failed\n if (i + 1) % (num_partitions // 10) == 0:\n tqdm.write('\\t good partitions: {}, bad partitions: {}'.format(i + 1, bad_partitions))\n print(\"Generated {} partitions respecting margin {}, with {} failed partitions.\".format(num_partitions, margin,\n bad_partitions))\n return partitions", "def get_all_nodes(self, partition: str, select: List[str] = None) -> Response:\n uri = build_uri(uri=url_node, partition=partition, select=select)\n return self._client.get(uri)", "def partitions(self) -> Optional[List[Dict]]:\n return [\n {\"search_name\": s[\"name\"], \"search_query\": s[\"query\"]}\n for s in self.config[\"searches\"]\n ]", "def load_partitions(partition_list, pickle_base_name=DEFAULT_REVIEWS_PICKLE + '.'):\n\n num_partition = 1\n result = []\n for partition in partition_list:\n print 'Reading partition %d of %d' % (num_partition, len(partition_list))\n with open(pickle_base_name + str(partition)) as file:\n loaded_element = pickle.load(file)\n result.extend(loaded_element)\n\n num_partition += 1\n\n print \"Read a total of %d partitions for a total of %d objects\" % (num_partition - 1, len(result))\n return result", "def iterate_partitions(self, spec=None, reverse=False):\n return self.partitions.iterate_partitions(spec=spec, reverse=reverse)", "def list(self, **params):\n # This is to ensure tenant_id key is not populated\n # if tenant_id=None is specified.\n tenant_id = params.pop('tenant_id', self.request.user.tenant_id)\n if tenant_id:\n params['tenant_id'] = tenant_id\n return self._list(**params)", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def get_tenant_resources(self):\n resources = self.context[\"tenant\"].get(\"resources\", [])\n if not resources:\n msg = (\"No resources found for tenant: %s\"\n % self.context[\"tenant\"].get(\"name\"))\n raise exceptions.NotFoundException(message=msg)\n for res_id in resources:\n self._get_resource(res_id)", "def show_net_partition(self, netpartition, **params):\r\n return self.get(self.net_partition_path % (netpartition),\r\n params=params)" ]
[ "0.67660326", "0.6758356", "0.6584987", "0.6551029", "0.63861006", "0.63013387", "0.6170014", "0.6149972", "0.60316294", "0.6015112", "0.58948547", "0.58694863", "0.5818245", "0.5794568", "0.57519025", "0.56942075", "0.56459546", "0.561672", "0.55776316", "0.5553374", "0.5541136", "0.552777", "0.5519986", "0.55010813", "0.54364276", "0.5433953", "0.5418495", "0.539902", "0.53928757", "0.53919905" ]
0.7341327
0
Fetch a network partition.
def show_net_partition(self, netpartition, **params): return self.get(self.net_partition_path % (netpartition), params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "def get_partition(self, partition_spec):\n return self.partitions[partition_spec]", "def getPartition(self):\n\t\treturn self.partition", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partitioning(disk):\n\n #TODO\n return \"Unknown\"", "def get_partition(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_GetPartition', self.handle, nIndex))", "def get_partition(self):\n return self._partition", "def get(self):\n return self._partition", "def get_hyperpartition(self, hyperpartition_id):\n return self.session.query(self.Hyperpartition).get(hyperpartition_id)", "def _requestComputePartition(self, compute_node_id, compute_partition_id,\n software_release, software_type, partition_reference,\n shared_xml, partition_parameter_xml, filter_xml, state):\n if state:\n state = loads(state)\n if state is None:\n state = 'started'\n if shared_xml is not _MARKER:\n shared = loads(shared_xml)\n else:\n shared = False\n if partition_parameter_xml:\n partition_parameter_kw = loads(partition_parameter_xml)\n else:\n partition_parameter_kw = dict()\n if filter_xml:\n filter_kw = loads(filter_xml)\n if software_type == 'pull-backup' and not 'retention_delay' in filter_kw:\n filter_kw['retention_delay'] = 7.0\n else:\n filter_kw = dict()\n\n kw = dict(software_release=software_release,\n software_type=software_type,\n software_title=partition_reference,\n instance_xml=castToStr(partition_parameter_kw),\n shared=shared,\n sla_xml=castToStr(filter_kw),\n state=state)\n\n portal = self.getPortalObject()\n if compute_node_id and compute_partition_id:\n requester = self.\\\n _getSoftwareInstanceForComputePartition(compute_node_id,\n compute_partition_id)\n instance_tree = requester.getSpecialiseValue()\n if instance_tree is not None and instance_tree.getSlapState() == \"stop_requested\":\n kw['state'] = 'stopped'\n key = '_'.join([instance_tree.getRelativeUrl(), partition_reference])\n else:\n # requested as root, so done by human\n requester = portal.portal_membership.getAuthenticatedMember().getUserValue()\n key = '_'.join([requester.getRelativeUrl(), partition_reference])\n \n last_data = requester.getLastData(key)\n requested_software_instance = None\n value = dict(\n hash='_'.join([requester.getRelativeUrl(), str(kw)]),\n )\n\n if last_data is not None and isinstance(last_data, type(value)):\n requested_software_instance = self.restrictedTraverse(\n last_data.get('request_instance'), None)\n \n if last_data is None or not isinstance(last_data, type(value)) or \\\n last_data.get('hash') != value['hash'] or \\\n requested_software_instance is None:\n if compute_node_id and compute_partition_id:\n requester.requestInstance(**kw)\n else:\n # requester is a person so we use another method\n requester.requestSoftwareInstance(**kw)\n requested_software_instance = self.REQUEST.get('request_instance')\n if requested_software_instance is not None:\n value['request_instance'] = requested_software_instance\\\n .getRelativeUrl()\n requester.setLastData(value, key=key)\n\n if requested_software_instance is None:\n raise SoftwareInstanceNotReady\n else:\n if not requested_software_instance.getAggregate(portal_type=\"Compute Partition\"):\n raise SoftwareInstanceNotReady\n else:\n return dumps(SlapSoftwareInstance(\n **requested_software_instance._asSoftwareInstanceDict()))", "def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')", "def __get_partition__(self, account, container, obj, part_shift):\n \n key = hash_path(account, container, obj, raw_digest=True)\n part = unpack_from('>I', key)[0] >> part_shift\n return part", "def list_net_partitions(self, **params):\r\n return self.get(self.net_partitions_path, params=params)", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"partition\")", "def get_mgmt_partition(adapter):\n\n # There will almost always be fewer VIOSes than LPARs. Since we're\n # querying without xags, it should be very quick.\n vio_wraps = vios.VIOS.search(adapter, is_mgmt_partition=True)\n if len(vio_wraps) == 1:\n return vio_wraps[0]\n\n # We delay the query to the LPARs because there could be hundreds of them.\n # So we don't want to query it unless we need to.\n lpar_wraps = lpar.LPAR.search(adapter, is_mgmt_partition=True)\n if len(lpar_wraps) == 1:\n return lpar_wraps[0]\n\n # If we made it here, something is wrong.\n raise ex.ManagementPartitionNotFoundException(\n count=len(vio_wraps + lpar_wraps))", "def get_partitions(disk):\n partitions = []\n script = [\n 'select disk {}'.format(disk['Number']),\n 'list partition']\n\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Append partition numbers\n output = result.stdout.decode().strip()\n regex = r'Partition\\s+(\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)\\s+'\n for tmp in re.findall(regex, output, re.IGNORECASE):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n partitions.append({'Number': num, 'Size': size})\n\n return partitions", "def get_this_partition(adapter):\n myid = u.my_partition_id()\n\n # There will almost always be fewer VIOSes than LPARs. Since we're\n # querying without xags, it should be very quick.\n vio_wraps = vios.VIOS.search(adapter, id=myid)\n if len(vio_wraps) == 1:\n return vio_wraps[0]\n\n # We delay the query to the LPARs because there could be hundreds of them.\n # So we don't want to query it unless we need to.\n lpar_wraps = lpar.LPAR.search(adapter, id=myid)\n if len(lpar_wraps) == 1:\n return lpar_wraps[0]\n\n # If we made it here, something is wrong.\n raise ex.ThisPartitionNotFoundException(\n count=len(vio_wraps + lpar_wraps), lpar_id=myid)", "def partition_path(self):\n return \"/kiel/groups/%s/partitions\" % self.group_name", "def partition(cls, key):\n return cls.partition_indexed(\n cls.hash_ring.select_bucket(key),\n )", "def partition(self):\n return self.tag(\"partition\")", "def get_partition_dev(dev, pnum):\n name = get_dev_name(os.path.realpath(dev))\n partname = None\n for f in os.listdir(os.path.join('/sys/block', name)):\n if f.startswith(name) and f.endswith(str(pnum)):\n # we want the shortest name that starts with the base name and ends with the partition number\n if not partname or len(f) < len(partname):\n partname = f\n if partname:\n return get_dev_path(partname)\n else:\n raise Error('partition %d for %s does not appear to exist' % (pnum, dev))", "def get_part(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgHdd_GetPart', self.handle, nIndex))", "def fetch(self, segment):\n pass", "def choose_partition():\n # Ask the user wether the partitions should be taken from the original partitions, or from the home-made partitions\n file_name = selector([\"The original partition given by the instructor\", \"The homemade partition file\"], [\"ORIGINAL\", \"HOMEMADE\"])\n\n # Open the corresponding file\n if file_name == \"1\" or file_name == \"ORIGINAL\":\n file = open(\"./assets/partitions.txt\", \"r\")\n elif file_name == \"2\" or file_name == \"HOMEMADE\":\n file = open(\"./assets/homemade_partitions.txt\", \"r\")\n\n skip_lines(-1)\n\n # Print all song's names in the partitions\n lines = file.readlines()\n file.close()\n for i in range(0, len(lines), 2):\n print(lines[i][:-1])\n\n # Ask the user to choose for a song\n song_index = choose_number(len(lines) / 2)\n\n # Get the corresponding song's partition and convert notes to Note instances\n partition = lines[song_index * 2 - 1][:-1].replace(' ', '')\n raw_notes = get_notes_from_line(partition)\n parsed_notes = [Note(note) for note in raw_notes]\n return parsed_notes", "def delete_net_partition(self, netpartition):\r\n return self.delete(self.net_partition_path % netpartition)", "def requestComputerPartition(self, computer_id=None,\n computer_partition_id=None, software_release=None, software_type=None,\n partition_reference=None, partition_parameter_xml=None,\n filter_xml=None, state=None, shared_xml=_MARKER):\n return self._requestComputePartition(computer_id, computer_partition_id,\n software_release, software_type, partition_reference,\n shared_xml, partition_parameter_xml, filter_xml, state)", "def get_partition_details(disk, partition):\n details = {}\n script = [\n 'select disk {}'.format(disk['Number']),\n 'select partition {}'.format(partition['Number']),\n 'detail partition']\n\n # Diskpart details\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Get volume letter or RAW status\n output = result.stdout.decode().strip()\n tmp = re.search(r'Volume\\s+\\d+\\s+(\\w|RAW)\\s+', output)\n if tmp:\n if tmp.group(1).upper() == 'RAW':\n details['FileSystem'] = RAW\n else:\n details['Letter'] = tmp.group(1)\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Get MBR type / GPT GUID for extra details on \"Unknown\" partitions\n guid = PARTITION_UIDS.get(details.get('Type').upper(), {})\n if guid:\n details.update({\n 'Description': guid.get('Description', '')[:29],\n 'OS': guid.get('OS', 'Unknown')[:27]})\n\n if 'Letter' in details:\n # Disk usage\n try:\n tmp = psutil.disk_usage('{}:\\\\'.format(details['Letter']))\n except OSError as err:\n details['FileSystem'] = 'Unknown'\n details['Error'] = err.strerror\n else:\n details['Used Space'] = human_readable_size(tmp.used)\n\n # fsutil details\n cmd = [\n 'fsutil',\n 'fsinfo',\n 'volumeinfo',\n '{}:'.format(details['Letter'])\n ]\n try:\n result = run_program(cmd)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Add \"Feature\" lines\n details['File System Features'] = [s.strip() for s in tmp\n if ':' not in s]\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Set Volume Name\n details['Name'] = details.get('Volume Name', '')\n\n # Set FileSystem Type\n if details.get('FileSystem', '') not in ['RAW', 'Unknown']:\n details['FileSystem'] = details.get('File System Name', 'Unknown')\n\n return details", "def get_partition_cfg(partition_type: str, **kwargs) -> Dict:\n raise NotImplementedError" ]
[ "0.69014907", "0.6638482", "0.653303", "0.6299674", "0.6299674", "0.6172137", "0.6167602", "0.6078324", "0.60572803", "0.58988655", "0.57241666", "0.5673879", "0.5632247", "0.56248057", "0.55858135", "0.55768174", "0.5530593", "0.54860526", "0.5475339", "0.5435857", "0.5421893", "0.5408059", "0.54030955", "0.5398906", "0.53777814", "0.53664434", "0.5354651", "0.53513473", "0.533448", "0.53333974" ]
0.703554
0
Create a network partition.
def create_net_partition(self, body=None): return self.post(self.net_partitions_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def partition_network(self, *args):\n Blockade.blockade_create_partition(*args)", "def createPartition(self, mp, mtype, fs, size, vg, nr):\n startSector = 0\n endSector = 0\n\n # primary partition: calculate the space according instructions below\n if mtype == 'Pri':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate the end sector\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n # extended partition: update primary and logical pointers\n # when a extended partition is given, its size is not taken into account\n elif mtype == 'Ext':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate end sector pointer\n endSector = int(self.__diskSize * MEGABYTE / float(self.__sectorSize)) + startSector - 1\n if endSector > MAX_SECTOR_POSSIBLE:\n endSector = MAX_SECTOR_POSSIBLE\n\n self.__extEndSector = endSector\n\n # decrease disk size\n self.__diskSize -= EXTENT_SIZE - 1\n\n # logical partition: calculate the space according instructions below\n elif mtype == 'Log':\n\n # FIXME, need to improve\n # just for zkvm without extended partition\n self.__extEndSector = endSector\n # refresh start sector pointer\n startSector = self.__primaryStartPoint + self.__sectorOffset\n\n if size == ALL_AVAILABLE:\n endSector = self.__extEndSector\n size = self.__diskSize - 1\n self.__diskSize = 0\n\n else: \n # calculate end sector pointer\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n\n part = {}\n part['command'] = 'create:partition'\n part['id'] = \"%s-part%s\" % (self.__diskId, str(nr))\n part['name'] = self.__disk + str(nr)\n part['mount_point'] = mp\n part['type'] = mtype\n part['fs'] = fs\n part['multipath'] = self.__hasMultipath\n part['raid_name'] = None\n part['disk_name'] = '/dev/%s' % self.__disk\n part['size'] = size\n part['vg'] = vg\n part['nr'] = nr\n part['format'] = True\n part['start'] = startSector\n part['end'] = endSector\n\n if self.__hasMultipath:\n part['disk_name'] = '/dev/mapper/%s' % self.__disk\n\n # extended partition: do not format\n if mtype == 'Ext':\n part['format'] = False\n\n return part", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def create_partition(self, partition_spec, if_not_exists=False, async_=False, hints=None):\n return self.partitions.create(\n partition_spec, if_not_exists=if_not_exists, hints=hints, async_=async_\n )", "def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"", "def createPhysicalVolume(self, disk, size, partition):\n pv = {}\n pv['command'] = 'create:pv'\n\n # FIXME: partitioner module only accepts the single name of the disk to\n # create LVM partitions, but when it creates physical partitions, it\n # requires full paths. The right way is configure both cases using the\n # same string\n pv['disk'] = disk.split('/')[-1]\n\n pv['size'] = size\n pv['partition'] = partition\n\n return pv", "def create(self, directory):\n\n if not self.preallocated:\n if directory:\n self.filename = '%s/%s' % (directory, self.filename)\n logging.info('Creating disk image: %s' % self.filename)\n qemu_img_output = run_cmd(qemu_img_path(), 'create', '-f', 'raw', self.filename, '%dM' % self.size)\n if not os.path.exists(self.filename): \n logging.info(\"Problem while creating raw image: %s\" % qemu_img_output)\n raise Exception(\"Problem while creating raw image: %s\" % qemu_img_output)\n\n # From here, we assume that self.filename refers to whatever holds the disk image,\n # be it a file, a partition, logical volume, actual disk..\n\n logging.info('Adding partition table to disk image: %s' % self.filename)\n run_cmd('parted', '--script', self.filename, 'mklabel', 'msdos')\n\n # Partition the disk \n for part in self.partitions:\n part.create(self)\n\n logging.info('Creating loop devices corresponding to the created partitions')\n self.vm.add_clean_cb(lambda : self.unmap(ignore_fail=True))\n kpartx_output = run_cmd('kpartx', '-av', self.filename)\n parts = []\n for line in kpartx_output.split('\\n'):\n if line == \"\" or line.startswith(\"gpt:\") or line.startswith(\"dos:\"):\n continue\n if line.startswith(\"add\"):\n parts.append(line)\n continue\n logging.error('Skipping unknown line in kpartx output (%s)' % line)\n mapdevs = []\n for line in parts:\n mapdevs.append(line.split(' ')[2])\n for (part, mapdev) in zip(self.partitions, mapdevs):\n part.mapdev = '/dev/mapper/%s' % mapdev\n\n # At this point, all partitions are created and their mapping device has been\n # created and set as .mapdev\n\n # Adds a filesystem to the partition\n logging.info(\"Creating file systems\")\n for part in self.partitions:\n part.mkfs()", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def createDisk(self , name):\n return", "def _createOwnPartition(self, databaseCursor, uniqueItems):\n self.logger.debug(\"%s - in createOwnPartition for %s\",threading.currentThread().getName(),self.name)\n for x in uniqueItems:\n #self.logger.debug(\"DEBUG - item value is %s\",x)\n partitionCreationParameters = self.partitionCreationParameters(x)\n partitionName = self.partitionNameTemplate % partitionCreationParameters[\"partitionName\"]\n if partitionWasCreated(partitionName):\n #self.logger.debug(\"DEBUG - skipping creation of %s\",partitionName)\n continue\n partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters\n #self.logger.debug(\"%s - Sql for %s is %s\",threading.currentThread().getName(),self.name,partitionCreationSql)\n aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)\n self.logger.debug(\"%s - savepoint createPartitions_%s\",threading.currentThread().getName(), partitionName)\n databaseCursor.execute(\"savepoint createPartitions_%s\" % partitionName)\n try:\n self.logger.debug(\"%s - creating %s\", threading.currentThread().getName(), partitionName)\n aPartition._createSelf(databaseCursor)\n markPartitionCreated(partitionName)\n self.logger.debug(\"%s - successful - releasing savepoint\", threading.currentThread().getName())\n databaseCursor.execute(\"release savepoint createPartitions_%s\" % partitionName)\n except pg.ProgrammingError, x:\n self.logger.debug(\"%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s\", threading.currentThread().getName(), partitionName, str(x).strip())\n databaseCursor.execute(\"rollback to createPartitions_%s; release savepoint createPartitions_%s;\" % (partitionName, partitionName))\n databaseCursor.connection.commit()", "def add_part(self, begin, length, type, mntpnt):\n end = begin+length-1\n logging.debug(\"add_part - begin %d, length %d, end %d\" % (begin, length, end))\n for part in self.partitions:\n if (begin >= part.begin and begin <= part.end) or \\\n (end >= part.begin and end <= part.end):\n raise Exception('Partitions are overlapping')\n if begin > end:\n raise Exception('Partition\\'s last block is before its first')\n if begin < 0 or end > self.size:\n raise Exception('Partition is out of bounds. start=%d, end=%d, disksize=%d' % (begin,end,self.size))\n part = self.Partition(disk=self, begin=begin, end=end, type=str_to_type(type), mntpnt=mntpnt)\n self.partitions.append(part)\n\n # We always keep the partitions in order, so that the output from kpartx matches our understanding\n self.partitions.sort(cmp=lambda x,y: x.begin - y.begin)", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def add_partition(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_AddPartition', self.handle))", "def create_network_segments(self, tenant_id, network_id,\n network_name, segments):", "async def create_partition(\n self,\n request: Optional[Union[metadata_.CreatePartitionRequest, dict]] = None,\n *,\n parent: Optional[str] = None,\n partition: Optional[metadata_.Partition] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> metadata_.Partition:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, partition])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = metadata_.CreatePartitionRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n if partition is not None:\n request.partition = partition\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.create_partition,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "def _CreateNewDisk(self, idx, params, _):\n # add a new disk\n disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n disk = self._GenerateDiskTemplateWrapper(idx, disk_template,\n params)\n new_disks = CreateDisks(self, self.instance, disks=[disk])\n self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n if self.cluster.prealloc_wipe_disks:\n # Wipe new disk\n WipeOrCleanupDisks(self, self.instance,\n disks=[(idx, disk, 0)],\n cleanup=new_disks)\n\n changes = [\n (\"disk/%d\" % idx,\n \"add:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n if self.op.hotplug:\n result = self.rpc.call_blockdev_assemble(self.instance.primary_node,\n (disk, self.instance),\n self.instance, True, idx)\n if result.fail_msg:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n self.LogWarning(\"Can't assemble newly created disk %d: %s\",\n idx, result.fail_msg)\n else:\n _, link_name, uri = result.payload\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)", "def create_disk(auth_parms, prod_offer, disk_size, disk_name, vdc_uuid):\n\n # get product offer uuid for the disk in question\n prod_offer_uuid = get_prod_offer_uuid(auth_parms, prod_offer)\n\n disk_job = rest_create_disk(auth_parms, vdc_uuid, prod_offer_uuid, disk_name, disk_size)\n\n disk_uuid = disk_job['itemUUID']\n print(\"New disk UUID=\" + disk_uuid)\n\n # Check the job completes\n status = wait_for_job(auth_parms, disk_job['resourceUUID'], \"SUCCESSFUL\", 90)\n if (status != 0):\n raise Exception(\"Failed to add create disk (uuid=\" + disk_uuid + \")\")\n\n return disk_uuid", "def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))", "def __create_partition(self,partition_dt):\n\n p_array = self.__partition_date_to_path_array(partition_dt)\n \n # For each component, fetch the group or create it\n # Year\n try:\n y_group = self.root_group._f_get_child(p_array[0])\n except tables.NoSuchNodeError:\n y_group = self.file.create_group(self.root_group,p_array[0])\n\n # Month\n try:\n m_group = y_group._f_get_child(p_array[1])\n except tables.NoSuchNodeError:\n m_group = self.file.create_group(y_group,p_array[1])\n\n # Day\n try:\n d_group = m_group._f_get_child(p_array[2])\n except tables.NoSuchNodeError:\n d_group = self.file.create_group(m_group,p_array[2])\n\n # We need to create the table in the day group\n ts_data = self.file.create_table(d_group,'ts_data',self.table_description,self.table_title,\n self.table_filters, self.table_expectedrows, self.table_chunkshape, self.table_byteorder)\n\n # Need to save this as an attribute because it doesn't seem to be saved anywhere\n ts_data.attrs._TS_TABLES_EXPECTEDROWS_PER_PARTITION = self.table_expectedrows\n\n return ts_data", "def partition(data, num_partitions=None, by=None, **kwargs):\n return Component(\n \"Partition\",\n arguments={\n 'data': Component.of(data),\n 'num_partitions': Component.of(num_partitions),\n 'by': Component.of(by)\n },\n options={\n \n },\n constraints=kwargs)", "def test_create_part(self):\n pass", "def create_platform_network(enode, category, config):\n # Check if this category has a defined netns\n netns = config.get('netns', None)\n if netns is None:\n return\n\n # Create the given network namespace\n enode._docker_exec('ip netns add {}'.format(netns))\n\n # lo should always be up\n enode._docker_exec('ip netns exec {} ip link set dev lo up'.format(netns))", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net", "def create_fs_on_disk(vm_name, disk_alias, executor=None):\n if ll_vms.get_vm_state(vm_name) == config.VM_DOWN:\n ll_vms.startVm(\n True, vm_name, wait_for_status=config.VM_UP,\n wait_for_ip=True\n )\n if not executor:\n executor = get_vm_executor(vm_name)\n\n logger.info(\n \"Find disk logical name for disk with alias %s on vm %s\",\n disk_alias, vm_name\n )\n disk_logical_volume_name = get_logical_name_by_vdsm_client(\n vm_name, disk_alias\n )\n if not disk_logical_volume_name:\n # This function is used to test whether logical volume was found,\n # raises an exception if it wasn't found\n message = \"Failed to get %s disk logical name\" % disk_alias\n logger.error(message)\n return False, message\n\n logger.info(\n \"The logical volume name for the requested disk is: '%s'\",\n disk_logical_volume_name\n )\n\n logger.info(\n \"Creating label: %s\", CREATE_DISK_LABEL_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_LABEL_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating disk label: %s\", out)\n if rc:\n return rc, out\n logger.info(\n \"Creating partition %s\",\n CREATE_DISK_PARTITION_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_PARTITION_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating partition: %s\", out)\n if rc:\n return rc, out\n # '1': create the fs as the first partition\n # '?': createFileSystem will return a random mount point\n logger.info(\"Creating a File-system on first partition\")\n mount_point = create_filesystem(\n vm_name=vm_name, device=disk_logical_volume_name, partition='1',\n fs=FILESYSTEM, executor=executor\n )\n return True, mount_point", "def test_partition_with_barely_sufficient_space(self):\n self.setup()\n flops = 1000\n (e, ne, n, w, sw, s) = range(6)\n\n processors = list()\n for i in range(18):\n processors.append(Processor(i, flops))\n\n links = list()\n links.append(Link(0, 0, 0, 0, 1, s, s))\n\n _sdram = SDRAM(2**12)\n\n links = list()\n\n links.append(Link(0, 0, 0, 1, 1, n, n))\n links.append(Link(0, 1, 1, 1, 0, s, s))\n links.append(Link(1, 1, 2, 0, 0, e, e))\n links.append(Link(1, 0, 3, 0, 1, w, w))\n r = Router(links, False, 100, 1024)\n\n ip = \"192.162.240.253\"\n chips = list()\n for x in range(5):\n for y in range(5):\n chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))\n\n self.machine = Machine(chips)\n singular_vertex = TestVertex(450, \"Large vertex\", max_atoms_per_core=1)\n self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)\n self.graph = ApplicationGraph(\n \"Graph with large vertex\", [singular_vertex], [])\n graph, mapper = self.bp.partition(self.graph, self.machine)\n self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1)\n self.assertEqual(len(graph.vertices), 450)", "def defineTasks(self,partition):\n recv_slots = partition.recvSlices()\n strm_slots = partition.streamSlices()\n recvNodes = partition.recvNodesFromSlots()\n strmNodes = partition.streamNodesFromSlots()\n opt = '/'+self.manager.hostName()+'/'+partition.manager.name()+'/'+partition.name+'/'\n cl0 = '/Class0'+opt\n cl1 = '/Class1'+opt\n\n partition.setDataSources([])\n tasks = []\n pn = self.partitionName()\n print '---------------------- Partition name is:',pn\n for i in xrange(len(recv_slots)):\n slot = recv_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_SND' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageSend'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setRecvSenders(tasks)\n tasks = []\n for i in xrange(len(strm_slots)):\n slot = strm_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_HLT' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageRecv'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setStreamReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in recvNodes:\n for itm in self.rcvInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setRecvInfrastructure(infra)\n partition.setRecvReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in strmNodes:\n for itm in self.strInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setStreamInfrastructure(infra)\n partition.setStreamSenders(tasks)\n if partition.saveTasks():\n tasks = partition.collectTasks(tasks={},with_data_sources=0)\n return tasks\n return None", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)" ]
[ "0.8267545", "0.73862374", "0.71127677", "0.69926906", "0.65314716", "0.63135135", "0.6260008", "0.621013", "0.6164522", "0.6133775", "0.6073639", "0.6063786", "0.60147226", "0.5996693", "0.5980926", "0.59421134", "0.5847319", "0.5832983", "0.57437587", "0.56981105", "0.5680569", "0.5585997", "0.55173236", "0.5515168", "0.5495967", "0.5495342", "0.5488073", "0.5487653", "0.54842764", "0.5469747" ]
0.81742126
1
Delete the network partition.
def delete_net_partition(self, netpartition): return self.delete(self.net_partition_path % netpartition)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_partition(self, partition):\n raise NotImplementedError('delete_file')", "def delete_partition(self, partition_spec, if_exists=False, async_=False, hints=None):\n return self.partitions.delete(\n partition_spec, if_exists=if_exists, hints=hints, async_=async_\n )", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def delete_disks(self, storage_elems):\n raise NotImplementedError()", "def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)", "def on_delete_clicked(self,button):\n\t\tself.list_partitions.delete_selected_partition()", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def delete_network_segments(self, tenant_id, network_segments):", "def delete_network_segment(context, segment_id):\n with db_api.context_manager.writer.using(context):\n network_obj.NetworkSegment.delete_objects(context, id=segment_id)", "async def delete_checkpoint_async(self, partition_id):", "def delete_cluster(self):", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return", "def deprovision(project, node, network, nic):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.NETWORK_PARAMETER: network,\n constants.NIC_PARAMETER: nic}\n res = requests.delete(_url + \"deprovision/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def delete(self):\r\n #validate where clause\r\n partition_key = self.model._primary_keys.values()[0]\r\n if not any([c.field == partition_key.column_name for c in self._where]):\r\n raise QueryException(\"The partition key must be defined on delete queries\")\r\n\r\n dq = DeleteStatement(\r\n self.column_family_name,\r\n where=self._where,\r\n timestamp=self._timestamp\r\n )\r\n self._execute(dq)", "def delete_blockreplica(self, block_replica):\n raise NotImplementedError('delete_blockreplica')", "async def delete_partition(\n self,\n request: Optional[Union[metadata_.DeletePartitionRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> None:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = metadata_.DeletePartitionRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.delete_partition,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )", "def delete_segment(self, n):\n self.get_segment(n).delete()", "def rmpart(self, dev, partno):\n return self.xcheck(\"rmpart\", dev, str(partno),\n onfail=_(\"Couldn't remove partition %s%d\") % (dev, partno))", "def test_delete_hyperflex_cluster_network_policy(self):\n pass", "def test_delete_cluster_network(self):\n pass", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def delete_network_postcommit(self, mech_context):\n\n LOG.debug(\"delete_network_postcommit: called\")\n network = mech_context.current\n network_id = network['id']\n vlan_id = network['provider:segmentation_id']\n tenant_id = network['tenant_id']\n\n for switch_ip in self._switch:\n try:\n system = self.client[switch_ip].system.list()\n system[0].remove_segment(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(_LE(\"SeaMicr driver: failed to delete network\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"Seamicro switch exception, delete_network_postcommit\"\n \" failed\"))\n\n LOG.info(_LI(\"delete network (postcommit): %(network_id)s\"\n \" with vlan = %(vlan_id)s\"\n \" for tenant %(tenant_id)s on switch %(switch_ip)s\"),\n {'network_id': network_id,\n 'vlan_id': vlan_id,\n 'tenant_id': tenant_id,\n 'switch_ip': switch_ip})", "def delete_node(self, node_cfg):\n with self.__connect_node(node_cfg) as conn:\n self._shutdown_node(conn)\n self._unprovision_node(conn)", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_device(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.delete_device) ENABLED START #\n # PROTECTED REGION END # // SdpMasterLeafNode.delete_device" ]
[ "0.7445487", "0.6641203", "0.6510621", "0.63935685", "0.6205695", "0.612753", "0.60952127", "0.6081498", "0.60551745", "0.60271436", "0.59529066", "0.5949345", "0.5939962", "0.59164274", "0.58938175", "0.58485097", "0.5816137", "0.5796667", "0.57603586", "0.5741945", "0.5737243", "0.57299435", "0.57036537", "0.5701084", "0.56798255", "0.56724215", "0.5660031", "0.56485176", "0.56158257", "0.56114906" ]
0.81478804
0
Create a new packet filter.
def create_packet_filter(self, body=None): return self.post(self.packet_filters_path, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, filter,\n double_precision = False):\n filters.check_is_filter(filter)\n (f, i) = filter\n filt_len = f.shape[0]\n assert filt_len == i * 2 + 1 # check it's symmetric\n dtype = (torch.float64 if double_precision else torch.float32)\n # the shape is (out_channels, in_channels, width),\n # where out_channels and in_channels are both 1.\n self.filt = torch.tensor(f, dtype=dtype).view(1, 1, filt_len)\n self.padding = i", "def make_filter(name, schema):\n return HSMFilter(name, schema)", "def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)", "def create_one(self, valid_json):\n # Note that we pop out the protocol filter here, so not to confuse\n # the individual filter constructors.\n proto_to_create = valid_json.pop(HotDataFormat.TRANSPORT)\n for proto in proto_to_create:\n proto = proto.lower()\n\n # Check to ensure we're configured to use this protocol. If we're\n # not, just ignore it and carry on.\n if proto not in self.valid_proto:\n continue\n\n # Do the actual production, just passing on the json\n db_segment = DB_SEGMENTS[proto]\n FilterClass = db_segment.FILTER\n filter = FilterClass(valid_json)\n\n # Put the new filter into the structure. We use defaultdict, adding\n # TrafficFilterLists only as we need them, avoiding empty lists\n # which would imply a broad filter.\n self.output[db_segment].append(filter)", "def add_received_packet_filter(self, filter_func):\n self._received_packet_filters.append(filter_func)", "def create(self, filter_entry, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_with_http_info(filter_entry, **kwargs)\n else:\n (data) = self.create_with_http_info(filter_entry, **kwargs)\n return data", "def new(name):\r\n\r\n g_filter(name)", "def init(self, *args):\n return _ida_hexrays.udc_filter_t_init(self, *args)", "def attach_filter(node):\n ifname = node.name + \"-eth2\"\n\n logging.info(\"attaching filter to node %r\" % node)\n\n node.cmd(\"tc qdisc del dev %s clsact\" % ifname)\n\n node.cmd(\"tc qdisc add dev %s clsact\" % ifname )\n node.cmd(\"rm bpf_fifo; mkfifo bpf_fifo\")\n # this is when\n node.cmd(\"rm /tmp/bpf\")\n node.cmd('tc exec bpf import /tmp/bpf run sh read_map.sh > /tmp/out_bpf &')\n\n node.cmd('tc qdisc add dev %s clsact' % ifname)\n cmd = \"tc filter add dev {ifname} ingress bpf obj {bytecode} section action direct-action\".format(\n ifname=ifname,\n bytecode=EBPF_DROPPER_BYTECODE)\n\n # to see the filters you have to run\n # tc filter show dev gateway-eth2 ingress\n\n # print cmd\n out = node.cmd(cmd)\n if \"File exists\" in out:\n cmd = cmd.replace(\"add\", \"change\")\n print(\"exists\")\n out = node.cmd(cmd)\n\n if out:\n print(cmd)\n print(out)", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def __init__(self):\n if self.__class__ == ElementFilter:\n _self = None\n else:\n _self = self\n this = _libsbml.new_ElementFilter(_self, )\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, num_filters, filter_size, padding=0, stride=1):\n self.num_filters = num_filters\n self.filter_size = filter_size\n self.padding = padding\n self.stride = stride\n self.conv_filter = np.random.rand(num_filters, filter_size, filter_size)/(filter_size*filter_size)", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def apply_filter(self, filt):\n new_timetraces = filt(self.timetraces)\n return self.__class__(\n new_timetraces,\n self.time,\n self.tx,\n self.rx,\n self.probe,\n self.examination_object,\n self.metadata,\n )", "def __init__(self, image, filter_name, cutoff, order = 0):\r\n self.image = image\r\n if filter_name == 'ideal_l':\r\n self.filter = self.get_ideal_low_pass_filter\r\n elif filter_name == 'ideal_h':\r\n self.filter = self.get_ideal_high_pass_filter\r\n elif filter_name == 'butterworth_l':\r\n self.filter = self.get_butterworth_low_pass_filter\r\n elif filter_name == 'butterworth_h':\r\n self.filter = self.get_butterworth_high_pass_filter\r\n elif filter_name == 'gaussian_l':\r\n self.filter = self.get_gaussian_low_pass_filter\r\n elif filter_name == 'gaussian_h':\r\n self.filter = self.get_gaussian_high_pass_filter\r\n\r\n self.cutoff = cutoff\r\n self.order = order\r\n self.filter_name = filter_name", "def cloudflare_waf_filter_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n expression = args['expression']\n zone_id = args.get('zone_id', client.zone_id)\n\n ref = args.get('ref')\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n\n response = client.cloudflare_waf_filter_create_request(\n expression, zone_id, description=description, paused=paused, ref=ref)\n\n cloned_response = copy.deepcopy(response)\n output = cloned_response['result']\n output.append({'zone_id': zone_id})\n\n readable_output = tableToMarkdown(\n name='Filter was successfully created.',\n t=output,\n headers=['id', 'expression', 'paused', 'description', 'ref'],\n headerTransform=pascalToSpace\n )\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.Filter',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,\n strip_disallowed_elements=False, strip_html_comments=True,\n **kwargs):\n self.attr_filter = attribute_filter_factory(attributes)\n\n self.strip_disallowed_elements = strip_disallowed_elements\n self.strip_html_comments = strip_html_comments\n\n return super(BleachSanitizerFilter, self).__init__(source, **kwargs)", "def __init__(self, type: int, filter: int):\n ...", "def __init__(self, init=None, filter_table=None, filter_name=None,\n filter_type=None, **kwargs):\n super(MiriFilter, self).__init__(init=init, **kwargs)\n\n # Data type is filter.\n self.meta.filetype = 'FILTER'\n \n # Define the filter name and type, if given\n if filter_name is not None:\n self.meta.instrument.filter = filter_name\n if filter_type is not None:\n self.meta.instrument.filter_type = filter_type\n\n if filter_table is not None:\n try:\n self.filter_table = filter_table\n except (ValueError, TypeError) as e:\n strg = \"filter_table must be a numpy record array or list of records.\"\n strg += \"\\n %s\" % str(e)\n raise TypeError(strg)\n \n # Define the wavelength units.\n# units = self.get_data_units('filter_table')\n \n # Cached arrays\n self._wavelength = None\n self._transmission = None\n self._interptransmission = None", "def __init__(self, filterName, footprint, maskedImage, psf, psffwhm, avgNoise,\n maxNumberOfPeaks, debResult):\n self.filter = filterName\n self.fp = footprint\n self.maskedImage = maskedImage\n self.psf = psf\n self.psffwhm = psffwhm\n self.img = maskedImage.getImage()\n self.imbb = self.img.getBBox()\n self.varimg = maskedImage.getVariance()\n self.mask = maskedImage.getMask()\n self.avgNoise = avgNoise\n self.updateFootprintBbox()\n self.debResult = debResult\n self.peakCount = debResult.peakCount\n self.templateSum = None\n\n # avgNoise is an estiamte of the average noise level for the image in this filter\n if avgNoise is None:\n stats = afwMath.makeStatistics(self.varimg, self.mask, afwMath.MEDIAN)\n avgNoise = np.sqrt(stats.getValue(afwMath.MEDIAN))\n debResult.log.trace('Estimated avgNoise for filter %s = %f', self.filter, avgNoise)\n self.avgNoise = avgNoise\n\n # Store all of the peak information in a single object\n self.peaks = []\n peaks = self.fp.getPeaks()\n for idx in range(self.peakCount):\n deblendedPeak = DeblendedPeak(peaks[idx], idx, self)\n self.peaks.append(deblendedPeak)", "def cloudflare_waf_filter_create_request(self, expression: str, zone_id: str, ref: str = None, paused: bool = None,\n description: str = None) -> Dict[str, Any]:\n params = remove_empty_elements({\n 'expression': expression,\n 'ref': ref,\n 'paused': paused,\n 'description': description,\n 'zone_id': zone_id\n })\n return self._http_request(\n method='POST',\n url_suffix=f'zones/{zone_id}/filters',\n json_data=[params])", "def add_filter(self, f, **kwargs):\n if not isinstance(f, UnitFilter):\n msg = \"Argument of type Filter expected. Got type {0}\"\n raise TypeError(msg.format(type(f)))\n\n if f.wavelength_unit is None:\n msg = \"Filter wavelength must have units for storage.\"\n raise AttributeError(msg)\n\n append = kwargs.pop('append', True)\n\n f.write_to(\"{0:s}\".format(self.source),\n tablename='/filters/{0}'.format(f.name),\n createparents=True, append=append,\n **kwargs)", "def __init__(\n self,\n n_filter,\n filter_size=3,\n strides=1,\n act=None,\n padding='SAME',\n data_format='channels_first',\n dilation_rate=1,\n W_init='glorot_uniform',\n b_init=None,\n in_channels=None,\n name=None,\n ):\n super().__init__(name, act)\n self.n_filter = n_filter\n self.filter_size = conv_utils.normalize_2d_args('ksize', filter_size)\n self.strides = conv_utils.normalize_2d_args('strides', strides)\n self.dilation_rate = conv_utils.normalize_2d_args('dilations', dilation_rate)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.padding = padding\n self.W_init = W_init\n self.b_init = b_init\n self.in_channels = in_channels\n self.W = None\n self.b = None\n if self.in_channels:\n self.build(None)\n self._built = True", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def design_filter(interpolation, decimation, fractional_bw):\n\n if fractional_bw >= 0.5 or fractional_bw <= 0:\n raise ValueError('Invalid fractional bandwidth, must be in (0, 0.5)')\n\n if decimation < 1 or interpolation < 1:\n raise ValueError('Invalid interpolation or decimation rate. Must be a non-zero positive integer.')\n\n beta = 7.0\n halfband = 0.5\n rate = float(interpolation)/float(decimation)\n if(rate >= 1.0):\n trans_width = halfband - fractional_bw\n mid_transition_band = halfband - trans_width/2.0\n else:\n trans_width = rate*(halfband - fractional_bw)\n mid_transition_band = rate*halfband - trans_width/2.0\n\n taps = filter.firdes.low_pass(interpolation, # gain\n interpolation, # Fs\n mid_transition_band, # trans mid point\n trans_width, # transition width\n filter.firdes.WIN_KAISER,\n beta) # beta\n\n return taps", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def packetize(cls, source, raw_data):\n pkt = cls(source, raw_data)\n\n if pkt.type not in DGTL.descriptors.keys():\n raise Warning('Unsupported packet type! (%s)' % pkt.type)\n\n pkt.set_decoder(DGTL.descriptors[pkt.type][2])\n\n return pkt", "def add(self, DrilldownType=None, NumberOfResults=None, ProtocolStackFilterId=None, SortAscending=None, SortingStatistic=None):\n # type: (str, int, List[str], bool, str) -> Layer23ProtocolStackFilter\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))" ]
[ "0.6488138", "0.614059", "0.613992", "0.6001748", "0.584723", "0.58219844", "0.57764983", "0.57568866", "0.5745277", "0.574284", "0.5729884", "0.5645626", "0.56437176", "0.5626443", "0.557961", "0.5567407", "0.554649", "0.5511731", "0.54957", "0.547382", "0.5453739", "0.5443173", "0.5443113", "0.5435968", "0.5421427", "0.5387865", "0.5378894", "0.53601056", "0.5343543", "0.5336474" ]
0.79164827
0
Update a packet filter.
def update_packet_filter(self, packet_filter_id, body=None): return self.put(self.packet_filter_path % packet_filter_id, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cloudflare_waf_filter_update_request(self, filter_id: str, expression: str, zone_id: str, ref: str = None,\n paused: bool = None, description: str = None) -> Dict[str, Any]:\n params = remove_empty_elements({\n 'id': filter_id,\n 'expression': expression,\n 'ref': ref,\n 'paused': paused,\n 'description': description,\n 'zone_id': zone_id\n })\n return self._http_request(\n method='PUT',\n url_suffix=f'zones/{zone_id}/filters',\n json_data=[params])", "def update(self, z):\n raise NotImplementedError('Must implement an update step for the filter.')", "def update_filter_params(self, fh):\n (self.data_timestamp, self.framerate,\n self.l, self.d, self.gamma,\n self.eps, self.alex, self.traceswitch) = (fh.attrs['data_timestamp'], fh.attrs['framerate'],\n fh.attrs['l'], fh.attrs['d'], fh.attrs['gamma'],\n fh.attrs['eps'], fh.attrs['alex'], fh.attrs['traceswitch'])", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def update(self, packet):\n raise NotImplementedError", "def filter(self, update):\n\n raise NotImplementedError", "def cloudflare_waf_filter_update_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n filter_id = args['id']\n expression = args.get('expression')\n zone_id = args.get('zone_id', client.zone_id)\n ref = args.get('ref')\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n\n response = client.cloudflare_waf_filter_update_request(\n filter_id, expression, zone_id, description=description, # type: ignore\n paused=paused, ref=ref)\n\n output = response['result']\n\n return CommandResults(\n readable_output=f'Filter {filter_id} was successfully updated.',\n outputs_prefix='CloudflareWAF.Filter',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def filter(self, filter):\n self._filter = filter", "def updateSyncDS(self, change, filter):\n # Merging existing syncDataSructure to accomodate\n if filter in self.syncDataStructure:\n temp = self.syncDataStructure[filter]\n for key, value in change.items():\n if key in temp:\n temp[key] = value\n else:\n temp[key] = value\n # no filter exists in the existing syncDataStructure\n else:\n self.syncDataStructure[filter] = change", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def update(self, DrilldownType=None, NumberOfResults=None, ProtocolStackFilterId=None, SortAscending=None, SortingStatistic=None):\n # type: (str, int, List[str], bool, str) -> Layer23ProtocolStackFilter\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))", "def update_filter(self, measurement, robot_pos):\n new_weights = []\n for p in self.particles:\n p = self.__move(p, self.MOTION)\n angle = self.__measurement(p, robot_pos)\n prob = self.__measurement_prob(angle, measurement,\n self.sense_noise)\n new_weights.append(prob)\n new_weights = np.array(new_weights)\n new_weights /= np.sum(new_weights) # normalized weights\n self.weights = new_weights\n\n # if self.__neff() > self.n / 2:\n self.particles = self.__resample()", "def update(self, filter_id, filter_entry, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.update_with_http_info(filter_id, filter_entry, **kwargs)\n else:\n (data) = self.update_with_http_info(filter_id, filter_entry, **kwargs)\n return data", "def update_filters(self, fromTime='', toTime='', language=''):\n if fromTime:\n self._FILTERS['fromTime'] = fromTime\n if fromTime:\n self._FILTERS['toTime'] = toTime\n if language:\n self._set_language(language)", "def add_received_packet_filter(self, filter_func):\n self._received_packet_filters.append(filter_func)", "def update_filters(self, filters: str) -> None:\r\n\r\n log.debug(f'Updating filters to {filters}')\r\n\r\n parts = filters.split('&')\r\n\r\n for part in parts:\r\n value, key = part.split('=')\r\n\r\n if value == 'iv':\r\n self.__payload['prevMinIV'] = self.__payload['minIV']\r\n self.__payload['minIV'] = key.strip()\r\n elif value == 'exiv':\r\n self.__payload['exMinIV'] = key.strip()\r\n else:\r\n log.debug(f'Dont know filter: \"{part}\", ignoring...')\r\n\r\n self.__filters_string = filters", "def update_search_filter(\n *,\n db_session: Session = Depends(get_db),\n search_filter_id: int,\n search_filter_in: SearchFilterUpdate,\n):\n search_filter = get(db_session=db_session, search_filter_id=search_filter_id)\n if not search_filter:\n raise HTTPException(status_code=404, detail=\"A search_filter with this id does not exist.\")\n search_filter = update(\n db_session=db_session, search_filter=search_filter, search_filter_in=search_filter_in\n )\n return search_filter", "def on_filter_changed(self, text):\n\n self._filter.setFilterWildcard(text)\n self.update_label()", "def update_with_http_info(self, filter_id, filter_entry, **kwargs):\n\n all_params = ['filter_id', 'filter_entry']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'filter_id' is set\n if ('filter_id' not in params) or (params['filter_id'] is None):\n raise ValueError(\"Missing the required parameter `filter_id` when calling `update`\")\n # verify the required parameter 'filter_entry' is set\n if ('filter_entry' not in params) or (params['filter_entry'] is None):\n raise ValueError(\"Missing the required parameter `filter_entry` when calling `update`\")\n\n resource_path = '/filters/blacklist/{filterId}'.replace('{format}', 'json')\n path_params = {}\n if 'filter_id' in params:\n path_params['filterId'] = params['filter_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'filter_entry' in params:\n body_params = params['filter_entry']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept([])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def update(self, other):\n for filter, value in other.items():\n self.__setitem__(filter, value)", "def set_ChangeFilter(self, iid, dv, dt, extrema = True):\n self.filters[iid] = ChangeFilter(self, dv, dt, extrema)", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def policy_filter(self, policy_filter):\n self._policy_filter = policy_filter", "def dbtrace_filter_change(filter_name_field):\n\n pass", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def filter(ctx, fil, filter_host, filter_port):\n if not fil:\n raise ValueError(\"Must specify at least one filtering operaion (of the form '<filter>=<value>'\")\n client = aceclient.FilterClient(host=filter_host, port=filter_port)\n filters = {}\n for f in fil:\n filters.update(parse_tag(f))\n client.update(**filters)" ]
[ "0.6520881", "0.63559145", "0.62711424", "0.6238306", "0.6197009", "0.60501987", "0.6043722", "0.597732", "0.5932158", "0.58276725", "0.5822518", "0.5703243", "0.56657267", "0.56615776", "0.56402576", "0.5638194", "0.5618479", "0.5612873", "0.55636066", "0.55454075", "0.5543468", "0.55381936", "0.5531149", "0.5499209", "0.5488349", "0.54872555", "0.54466254", "0.54398835", "0.5434854", "0.5419994" ]
0.80393976
0
Fetch a list of all packet filters for a tenant.
def list_packet_filters(self, retrieve_all=True, **_params): return self.list('packet_filters', self.packet_filters_path, retrieve_all, **_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetFilters(self, bulk=False):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n\n filters = self._SendRequest(HTTP_GET, \"/%s/filters\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return filters\n else:\n return [f[\"uuid\"] for f in filters]", "def get_tenant_filters(table, filters=None):\n filters = filters or {}\n\n current_tenant_value = get_current_tenant_value()\n\n if not current_tenant_value:\n return filters\n\n if isinstance(current_tenant_value, list):\n filters[f\"{get_tenant_column(table)}__in\"] = current_tenant_value\n else:\n filters[get_tenant_column(table)] = current_tenant_value\n\n return filters", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def get_filters(self, project, metric_name, callback=None):\n filters = sorted((yield gen.Task(self.client.smembers,\n self.get_filters_names_key(project, metric_name))))\n if callback:\n callback(filters)", "async def list(self, ctx):\r\n try:\r\n if ctx.message.server.id not in self.adkillr:\r\n await self.bot.say(\"There are no filters set for this server.\")\r\n else:\r\n await self.bot.say(\"The current filters are\\n{}.\".format(\", \".join(self.adkillr[ctx.message.server.id]['filters'])))\r\n except KeyError:\r\n await self.bot.say(\"There are no filters set for this server.\")", "def get_filters(self):", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def allbroadbandfilters(self):\n return [exposuretimeandbroadbandfilter[1] for exposuretimeandbroadbandfilter in self.__allexposuretimesandbroadbandfilters]", "def get_filters(self) -> dict:\n return self._filters", "def list(self, resource=None):\n url = '/resource_filters'\n if resource is not None:\n url += '?resource=%s' % resource\n return self._list(url, \"resource_filters\")", "def list_filters():\n with Database() as base:\n filters = {name: base.get_filter(name) for name in\n base.get_filter_names()}\n\n name = Column(data=[filters[f].name for f in filters], name='Name')\n description = Column(data=[filters[f].description for f in filters],\n name='Description')\n wl = Column(data=[filters[f].effective_wavelength for f in filters],\n name='Effective Wavelength', unit=u.nm, format='%d')\n filter_type = Column(data=[filters[f].trans_type for f in filters],\n name='Type')\n samples = Column(data=[filters[f].trans_table[0].size for f in filters],\n name=\"Points\")\n\n t = Table()\n t.add_columns([name, description, wl, filter_type, samples])\n t.sort(['Effective Wavelength'])\n t.pprint(max_lines=-1, max_width=-1)", "def getCbsdRecords(self, filters=[]):\n return self._getRecords('cbsd', filters)", "def filters(self):\n return self._filters", "def cloudflare_waf_filter_list_request(self, args: dict, page: int = None, page_size: int = None) -> Dict[str, Any]:\n params = remove_empty_elements({\n 'id': args.get('filter_id'),\n 'expression': args.get('expression'),\n 'ref': args.get('ref'),\n 'paused': args.get('paused'),\n 'description': args.get('description'),\n 'page': page,\n 'per_page': page_size\n })\n zone_id = args.get('zone_id')\n return self._http_request(\n method='GET',\n url_suffix=f'zones/{zone_id}/filters',\n params=params)", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def get_list_filters(self):\n # look in session for the saved search...\n filters = ListFilter()\n filters.get_list_filter(self.table)\n return filters", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def filters(self):\n return self.__filters", "def group_filters(self, per_page=None, page=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'filters')\r\n return http.Request('GET', url, params), parsers.parse_json", "def fetch_query_filters(self, dimension_alias):\n dimension_filters = []\n\n for filter_ in self._filters:\n filter_fields = [\n field\n for field in filter_.definition.fields_()\n if all(table == self.dataset.table for table in field.tables_)\n ]\n\n if not filter_fields:\n continue\n\n if all(field.name == dimension_alias for field in filter_fields):\n dimension_filters.append(filter_)\n\n return dimension_filters", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def list_devices(cls, filters={}):\n return cls.dbdriver.list_devices(filters)", "def get_devices(url, token, filter_by_lst=None, filter_by_comp=None):\n assert filter_by_lst is None or filter_by_comp is None\n\n res = []\n for dev in get(url + '/states', token):\n if filter_by_comp is not None:\n comp, _ = dev['entity_id'].split(\".\")\n if comp == filter_by_comp:\n res.append(dev)\n continue\n if filter_by_lst is not None:\n if dev['entity_id'] in filter_by_lst:\n res.append(dev)\n continue\n res.append(dev['entity_id'])\n return res", "def broadbandfilters(self):\n all = self.allbroadbandfilters\n return [all[layer-1] for layer in self.__layers]", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def getZoneRecords(self, filters=[]):\n return self._getRecords('zone', filters)", "async def getQueryFilters(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getQueryFilters()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]", "def get_filter_parameters(self):\n if not self.should_filter():\n return []\n\n fields = []\n for filter_backend in self.view.filter_backends:\n fields += self.get_filter_backend_parameters(filter_backend())\n\n return fields", "def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types" ]
[ "0.68841", "0.6719281", "0.66721904", "0.6203976", "0.62009436", "0.6171794", "0.6166032", "0.61062443", "0.59795624", "0.5976046", "0.5959277", "0.58664405", "0.5858418", "0.58559227", "0.5850228", "0.5850065", "0.58031726", "0.57973117", "0.5790999", "0.578245", "0.57679826", "0.5730899", "0.57275856", "0.57148576", "0.57116413", "0.56770426", "0.5637476", "0.56227976", "0.55957276", "0.5584208" ]
0.69572335
0
Fetch information of a certain packet filter.
def show_packet_filter(self, packet_filter_id, **_params): return self.get(self.packet_filter_path % packet_filter_id, params=_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(self, pkt):\n return pkt", "def getFilter(self, channel, group, node, unitCode=0, resp=None):\n resp = self.XAPCommand('FILTER', channel, group, node, unitCode=unitCode, rtnCount=9)\n type = None\n freq = None\n gain = None\n bandwidth = None\n if int(resp[5]) is not 0:\n type = int(resp[5])\n freq = float(resp[6])\n if type is 4 or type is 5:\n gain = float(resp[7])\n elif type is 6:\n gain = float(resp[7])\n bandwidth = float(resp[8])\n elif type is 8 or type is 9 or type is 10:\n gain = int(resp[7])\n bandwidth = int(resp[8])\n elif type is 11:\n gain = float(resp[7])\n bandwidth = float(resp[8])\n return {\"type\": type,\n \"frequency\": freq,\n \"gain\": gain,\n \"bandwidth\": bandwidth,\n }", "def get_filter(image):\n logging.debug('Retrieving filter for {}'.format(image))\n try:\n filter = fits.getheader(image)['FILTER']\n except KeyError:\n logging.debug('No FILTER key found, trying FILTER1')\n #The 814 image has the filter information under the keyword FILTER2:\n filter = fits.getheader(image)['FILTER1']\n if filter[0].lower()!='f':\n logging.debug('FILTER1 does not match a filter designation, trying FILTER2')\n filter = fits.getheader(image)['FILTER2']\n if filter[0].lower()!='f':\n logging.critical('No valid filter could be found in {}'.format(image))\n filemanagement.shutdown('No valid filter found in the header on {}'.format(image))\n return filter", "def get_filters(self):", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def getFilter(self, type: int) -> int:\n ...", "def askfilter(self):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"filter?\" + \"\\r\\n\")\n r = m.read(100)\n r = r[9:]\n result = string.strip(r)\n return result\n else:\n pass", "def _filter_read(self, timeout=None):\n p = self._read(timeout)\n self._read_counter += 1\n if self._debug:\n print \"_filter_read: got a packet(%d): %s\" % (self._read_counter, p)\n\n # Pass the received packet through the filter functions:\n if p is not None:\n for filter_func in self._received_packet_filters:\n p = filter_func(p)\n # Stop now if the packet doesn't need further processing:\n if p is None:\n break\n\n # Return the packet (if there was no timeout and it wasn't filtered)\n return p", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def terraform_output_filter(filter, payload):\n if filter in payload:\n return payload[filter]['value']\n else:\n return None", "def get_filter_pillar(filter_name, pillar_key=\"acl\", pillarenv=None, saltenv=None):\n pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)\n return _lookup_element(pillar_cfg, filter_name)", "def __getHeaderInfo(self, decoded_data):\n\t\tip = decoded_data.child()\n\t\ttcp = ip.child()\n\t\t#src = (ip.get_ip_src(), tcp.get_th_sport())\n\t\ttry:\tsrc = ip.get_ip_src()\n\t\texcept:\tsrc = '?'\n\t\t#dst = (ip.get_ip_dst(), tcp.get_th_dport())\n\t\ttry:\tdst = ip.get_ip_dst()\n\t\texcept:\tdst = '?'\n\t\t#data = tcp.get_data_as_string()\n\t\tdata = tcp.get_packet()\n\t\treturn (src, dst, data)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def getFilter(self):\n\n return self.filter", "def GetFilter(self, filter_uuid):\n query = []\n\n return self._SendRequest(HTTP_GET,\n \"/%s/filters/%s\" % (GANETI_RAPI_VERSION,\n filter_uuid),\n query, None)", "def retrieve(self, trace_filter={}, limit=0):\n\n if isinstance(limit, int) and limit > 0:\n max_r = limit\n else:\n max_r = 0\n\n try:\n packets = self.__db.find_packets(self._collection, trace_filter, max_r)\n except MemoryError:\n print(\"Warning: cannot allocate sufficient memory for packets, perhaps you are using Windows?\")\n return []\n except:\n return []\n\n # Attempt to decode base64 payloads.\n for packet in packets:\n if packet[\"tcp_info\"] is not None:\n if isinstance(packet[\"tcp_info\"][\"payload\"], bytes):\n try:\n packet[\"tcp_info\"][\"payload\"] = b64decode(packet[\"tcp_info\"][\"payload\"])\n except:\n continue\n\n if packet[\"tls_info\"] is not None:\n for i, data in enumerate(packet[\"tls_info\"][\"data\"]):\n if isinstance(data, bytes):\n try:\n packet[\"tls_info\"][\"data\"][i] = b64decode(data)\n except:\n continue\n\n return packets", "def query(self, filters, options):\n result = []\n for channel in self.channels():\n cp = run(['ipmitool', 'lan', 'print', f'{channel}'], capture_output=True)\n if cp.returncode != 0:\n raise CallError(f'Failed to get details from channel {channel}: {cp.stderr}')\n\n data = {'channel': channel, 'id': channel}\n for line in filter(lambda x: ':' in x, cp.stdout.decode().split('\\n')):\n name, value = line.split(':', 1)\n if not name:\n continue\n\n name = name.strip()\n value = value.strip()\n\n if name == 'IP Address':\n data['ipaddress'] = value\n elif name == 'Subnet Mask':\n data['netmask'] = value\n elif name == 'Default Gateway IP':\n data['gateway'] = value\n elif name == '802.1q VLAN ID':\n if value == 'Disabled':\n data['vlan'] = None\n else:\n data['vlan'] = value\n elif name == 'IP Address Source':\n data['dhcp'] = False if value == 'Static Address' else True\n\n result.append(data)\n\n return filter_list(result, filters, options)", "def filters(self):\n\t\treturn self.local_filter", "def current_filter(self):\n return self._proxy.get(\"current_filter\", \"filterwheel\")", "def extractFilterStatistics(image, mask, name, filter_type=\"prewitt\", sigma=1, binCount=8):\n image_filt = filterStatistical(image, filter_type, sigma)\n features = extractFeatures(image_filt, mask, name, binCount=binCount, features=[\"FO\"])\n return features", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def get_filter_name(self):\n pass", "def filter(self, filter_id):\r\n self.require_collection()\r\n url = '{0}/{1}'.format(self.get_url(), filter_id)\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def device_get(self, filters={}):\n return {}", "def get_hst_filter(header):\n if 'FILTER' in header:\n return header['FILTER'].upper()\n \n if header['INSTRUME'].strip() == 'ACS':\n for i in [1,2]:\n filter_i = header['FILTER{0:d}'.format(i)]\n if 'CLEAR' in filter_i:\n continue\n else:\n filter = filter_i\n \n elif header['INSTRUME'] == 'WFPC2':\n filter = header['FILTNAM1']\n else:\n raise KeyError ('Filter keyword not found for instrument {0}'.format(header['INSTRUME']))\n \n return filter.upper()", "def get(self,filt):\n indx = [i for i in xrange(len(self.header)) if filt == self.header[i]]\n return self.components[indx[0]].getSplines()", "def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")", "def get_rx_rf_filter(self, rx_rf_filter):\n\t\treturn self.rx_rf_filter", "def getIntFromFilter(cls, name):\n try:\n return cls.SUPPORTED_FILTERS.index(name)\n except ValueError:\n raise NotImplementedError(f\"band '{name}' not supported by this ID packer.\")", "def broadbandfilter(self):\n _, = self.broadbandfilters\n return _" ]
[ "0.6224088", "0.5915098", "0.57071084", "0.5666445", "0.5662054", "0.5644824", "0.55097526", "0.54657936", "0.5435283", "0.5399172", "0.5383127", "0.5336129", "0.53209823", "0.52840376", "0.527451", "0.52673495", "0.5262919", "0.5229014", "0.5219188", "0.52071005", "0.52012235", "0.5198318", "0.5171205", "0.513004", "0.5114295", "0.5105101", "0.5098877", "0.5088131", "0.50864244", "0.5081879" ]
0.6721544
0
Delete the specified packet filter.
def delete_packet_filter(self, packet_filter_id): return self.delete(self.packet_filter_path % packet_filter_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cloudflare_waf_filter_delete_request(self, filter_id: str, zone_id: str) -> Dict[str, Any]:\n return self._http_request(\n method='DELETE',\n url_suffix=f'zones/{zone_id}/filters',\n params={'id': filter_id})", "def cloudflare_waf_filter_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n\n filter_id = args['filter_id']\n zone_id = args.get('zone_id', client.zone_id)\n\n output = client.cloudflare_waf_filter_delete_request(filter_id, zone_id)\n return CommandResults(\n readable_output=f'Filter {filter_id} was successfully deleted.',\n raw_response=output\n )", "def delete_filter(*, db_session: Session = Depends(get_db), search_filter_id: int):\n search_filter = get(db_session=db_session, search_filter_id=search_filter_id)\n if not search_filter:\n raise HTTPException(status_code=404, detail=\"A search_filter with this id does not exist.\")\n\n delete(db_session=db_session, search_filter_id=search_filter_id)", "def remove_received_packet_filter(self, filter_func):\n self._received_packet_filters.remove(filter_func)", "def DeleteFilter(self, uuid):\n body = {}\n query = []\n\n return self._SendRequest(HTTP_DELETE,\n (\"/%s/filters/%s\" % (GANETI_RAPI_VERSION, uuid)),\n query, body)", "def delete(self, filter_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_with_http_info(filter_id, **kwargs)\n else:\n (data) = self.delete_with_http_info(filter_id, **kwargs)\n return data", "def remove_crds_filter(self, filter):\n if filter in self.filters:\n self.filters.remove(filter)", "def filter_del(name):\n\n\tweechat.command(weechat.buffer_search_main(), \"/mute filter del %s\" % name)", "def delete_with_http_info(self, filter_id, **kwargs):\n\n all_params = ['filter_id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'filter_id' is set\n if ('filter_id' not in params) or (params['filter_id'] is None):\n raise ValueError(\"Missing the required parameter `filter_id` when calling `delete`\")\n\n resource_path = '/filters/blacklist/{filterId}'.replace('{format}', 'json')\n path_params = {}\n if 'filter_id' in params:\n path_params['filterId'] = params['filter_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept([])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def delete_firewall_policy(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_policy = self.network.find_firewall_policy(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_policy(\n firewall_policy, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall policy %s not found for deleting', name_or_id\n )\n return False\n return True", "def delete_cards(self, filterdict):\n\n self._collection.delete_many(self._constrain_keys(filterdict))", "async def delete_one(\n self,\n *,\n filter: Optional[Dict[str, Any]] = DEFAULT_FILTER,\n session: Optional[Any] = DEFAULT_SESSION,\n **kwargs: Any,\n ) -> DeleteResult:\n return await self._database.delete_one(\n self.name, filter=filter, session=session, **kwargs\n )", "def deleteUserSecurityFilter(self, name, _type, s=None, p=None, o=None, g=None):\n self._client.deleteUserSecurityFilter(name, _type, s, p, o, g)", "def removeAutoSaveDeleteFilter(filter):", "def delete_floatingip(self, floatingip):\r\n return self.delete(self.floatingip_path % (floatingip))", "def remove_filter():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n # reads the specific filter to add\n filter = request.json['filter']\n # reads all the filters\n all_filters = request.json['all_filters']\n\n new_handler = lh.get_handler_for_process_and_session(process, session).remove_filter(filter, all_filters)\n lh.set_handler_for_process_and_session(process, session, new_handler)\n\n return jsonify({\"status\": \"OK\"})\n\n return jsonify({\"status\": \"FAIL\"})", "def delete_firewall(self, firewall):\r\n return self.delete(self.firewall_path % (firewall))", "def delete(pav_cfg, id_dir: Path, filter_func: Callable[[Path], bool] = default_filter,\n transform: Callable[[Path], Any] = None,\n verbose: bool = False):\n\n count = 0\n msgs = []\n\n lock_path = id_dir.with_suffix('.lock')\n try:\n with lockfile.LockFile(lock_path, timeout=1):\n for path in select(pav_cfg, id_dir=id_dir, filter_func=filter_func,\n transform=transform).paths:\n try:\n shutil.rmtree(path.as_posix())\n except OSError as err:\n msgs.append(\"Could not remove {} {}: {}\"\n .format(id_dir.name, path.as_posix(), err))\n continue\n count += 1\n if verbose:\n msgs.append(\"Removed {} {}.\".format(id_dir.name, path.name))\n except TimeoutError:\n msgs.append(\"Could not delete in dir '{}', lock '{}' could not be acquired\"\n .format(id_dir, lock_path))\n\n reset_pkey(id_dir)\n return count, msgs", "def test_delete_saved_filter_success(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n _, token = AuthToken.objects.create(self.user_1)\n\n response = \\\n self.client.delete(url,\n HTTP_AUTHORIZATION='Token {}'.format(token),\n content_type=\"application/json\")\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(responseDict[\"message\"])", "def delete_firewall_group(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_group = self.network.find_firewall_group(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_group(\n firewall_group, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall group %s not found for deleting', name_or_id\n )\n return False\n return True", "def delete_firewall_rule(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n try:\n firewall_rule = self.network.find_firewall_rule(\n name_or_id, ignore_missing=False, **filters\n )\n self.network.delete_firewall_rule(\n firewall_rule, ignore_missing=False\n )\n except exceptions.ResourceNotFound:\n self.log.debug(\n 'Firewall rule %s not found for deleting', name_or_id\n )\n return False\n return True", "def delete_firewall_policy(self, firewall_policy):\r\n return self.delete(self.firewall_policy_path % (firewall_policy))", "def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)", "def del_filters(fnames):\n with Database(writable=True) as base:\n names = base.get_filter_names()\n for fname in fnames:\n if fname in names:\n base.del_filter(fname)\n print(\"Removing filter {}\".format(fname))\n else:\n print(\"Filter {} not in the database\".format(fname))", "def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)", "def __delitem__(self, filter):\n dict.__delitem__(self, filter)\n del self.__filter_order[filter]\n self.__cached_count = None\n\n match = Query.FILTER_REGEX.match(filter)\n property = match.group(1)\n operator = match.group(3)\n\n if operator in self.INEQUALITY_OPERATORS:\n assert self.__inequality_count >= 1\n assert property == self.__inequality_prop\n self.__inequality_count -= 1\n if self.__inequality_count == 0:\n self.__inequality_prop = None", "def do_del(self, args):\n\t\targs = args.split()\n\t\tif not args:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\tftype = args[0]\n\t\tvalues = args[1:]\n\n\t\ttry:\n\t\t\tif len(values) == 0:\n\t\t\t\tdel self.parent.filter[ftype]\n\t\t\telse:\n\t\t\t\tfor value in values:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.parent.filter[ftype].remove(value)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tself.parent.printErr(\"Unable to remove %s: No such item\" % (value))\n\t\t\t\tif len(self.parent.filter[ftype]) == 0:\n\t\t\t\t\tdel self.parent.filter[ftype]\n\t\texcept KeyError:\n\t\t\tself.parent.printErr(\"No such filter: '%s'\" % ftype)\n\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def test_delete_saved_filter_no_auth(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def minimum_bandwidth_rule_delete(request, policy_id, rule_id):\n\n neutronclient(request).delete_minimum_bandwidth_rule(rule_id, policy_id)", "async def remove(self, ctx, *, link):\r\n try:\r\n if link not in self.adkillr[ctx.message.server.id]['filters']:\r\n await self.bot.say(\"That link is not in the current filters.\")\r\n else:\r\n self.adkillr[ctx.message.server.id]['filters'].remove(link)\r\n await self.bot.say(\"Filter removed.\")\r\n except KeyError:\r\n await self.bot.say(\"There are no filters set.\")" ]
[ "0.7065296", "0.6998032", "0.694061", "0.68922025", "0.66605544", "0.64817053", "0.60303646", "0.5899795", "0.5713327", "0.56261694", "0.55948937", "0.5584926", "0.5552384", "0.55350125", "0.54980665", "0.5430551", "0.533297", "0.53167546", "0.52826464", "0.5256641", "0.5255871", "0.5249286", "0.5182059", "0.5178364", "0.51700604", "0.5136236", "0.5131732", "0.51168954", "0.5112528", "0.50937825" ]
0.82072103
0
Call do_request with the default retry configuration. Only idempotent requests should retry failed connection attempts.
def retry_request(self, method, action, body=None, headers=None, params=None): max_attempts = self.retries + 1 for i in range(max_attempts): try: return self.do_request(method, action, body=body, headers=headers, params=params) except exceptions.ConnectionFailed: # Exception has already been logged by do_request() if i < self.retries: _logger.debug(_('Retrying connection to Neutron service')) time.sleep(self.retry_interval) raise exceptions.ConnectionFailed(reason=_("Maximum attempts reached"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(self, request=None, *args, **kwargs):\n if request is not None:\n self.request = request\n\n retry = self.request.configuration.retry\n if not isinstance(retry, SimpleRetry):\n raise Error('Currently only the fast retry strategy is supported')\n\n last_exception = None\n for i in range(0, retry.max_retry):\n try:\n if i > 0:\n retry.sleep_jitter()\n\n self.call_once()\n return self.response\n\n except Exception as ex:\n last_exception = RequestFailed(message='Request failed', cause=ex)\n logger.debug(\"Request %d failed, exception: %s\" % (i, ex))\n\n # Last exception - throw it here to have a stack\n if i+1 == retry.max_retry:\n raise last_exception\n\n raise last_exception", "def _mexe(self, request, override_num_retries=1,\n retry_handler=None):\n log.debug('Method: %s' % request.method)\n log.debug('Url: %s' % request.url)\n log.debug('Data: %s' % request.body)\n log.debug('Headers: %s' % request.headers)\n returnValue = None\n response = None\n body = None\n ex = None\n if override_num_retries is None:\n num_retries = config.getint('TxBoto', 'num_retries', self.num_retries)\n else:\n num_retries = override_num_retries\n i = 0\n while i <= num_retries:\n # Use binary exponential backoff to desynchronize client requests.\n next_sleep = min(random.random() * (2 ** i),\n config.get('TxBoto', 'max_retry_delay', 60))\n try:\n request.authorize(connection=self)\n log.debug('Final headers: %s' % request.headers)\n request.start_time = datetime.now()\n\n response = yield self.send_request(request)\n response_body = yield response.content()\n response.reason = code2status(response.code, 'N/A')\n log.debug('Response headers: %s' % response.headers)\n location = response.headers.getRawHeaders('location')\n if location:\n location = location[0]\n if callable(retry_handler):\n status = yield defer.maybeDeferred(retry_handler, response,\n response_body, i,\n next_sleep)\n if status:\n msg, i, next_sleep = status\n if msg:\n log.debug(msg)\n time.sleep(next_sleep)\n continue\n if response.code in [500, 502, 503, 504]:\n msg = 'Received %d response. ' % response.code\n msg += 'Retrying in %3.1f seconds' % next_sleep\n log.debug(msg)\n body = response_body\n if isinstance(body, bytes):\n body = body.decode('utf-8')\n elif response.code < 300 or response.code >= 400 or \\\n not location:\n # don't return connection to the pool if response contains\n # Connection:close header, because the connection has been\n # closed and default reconnect behavior may do something\n # different than new_http_connection. Also, it's probably\n # less efficient to try to reuse a closed connection.\n if self.request_hook is not None:\n yield defer.maybeDeferred(\n self.request_hook.handle_request_data,\n request, response)\n returnValue = (response, response_body,)\n break\n except PleaseRetryException as e:\n log.debug('encountered a retry exception: {}'.foramt(e))\n response = e.response\n ex = e\n except self.http_exceptions as e:\n if isinstance(e, self.http_unretryable_exceptions):\n log.debug('encountered unretryable {} exception, re-raising'\n .format(e.__class__.__name__))\n raise\n log.debug('encountered {} exception, reconnecting'\n .format(e.__class__.__name__))\n ex = e\n time.sleep(next_sleep)\n i += 1\n\n if isinstance(returnValue, tuple):\n defer.returnValue(returnValue)\n # If we made it here, it's because we have exhausted our retries\n # and stil haven't succeeded. So, if we have a response object,\n # use it to raise an exception.\n # Otherwise, raise the exception that must have already happened.\n if self.request_hook is not None:\n yield defer.maybeDeferred(self.request_hook.handle_request_data,\n request, response, error=True)\n if response:\n raise BotoServerError(response.status, response.reason, body)\n elif ex:\n raise ex\n else:\n msg = 'Please report this exception as a TxBoto Issue!'\n raise BotoClientError(msg)", "def request(self, *args, **kwargs):\n\n ratelimit_retries, temporary_error_retries, ident_retries = 0, 0, {}\n\n while True:\n try:\n try:\n return self._request(*args, **kwargs)\n except Exception as exc:\n self.error_processor(exc)\n raise\n\n except Retry as exc:\n ident_retries.setdefault(exc.retry_ident, 0)\n ident_retries[exc.retry_ident] += 1\n if ident_retries[exc.retry_ident] <= exc.retry_count:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on: %s',\n ident_retries[exc.retry_ident], self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time,\n exc.retry_ident)\n if exc.wait_seconds:\n self.sleep(exc.wait_seconds,\n log_reason='retry request: {}'.format(exc.retry_ident))\n else:\n raise self.RetryExceeded(\n exc.result, retry_ident=exc.retry_ident, retry_count=exc.retry_count)\n\n except RatelimitError as exc:\n ratelimit_retries += 1\n if ratelimit_retries <= self.ratelimit_retries:\n self.logger.warning('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n ratelimit_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.ratelimit_wait_seconds,\n log_reason='ratelimit wait')\n else:\n if ratelimit_retries - 1:\n raise self.RetryExceeded(exc, retry_count=ratelimit_retries - 1)\n raise\n\n except TemporaryError as exc:\n temporary_error_retries += 1\n if temporary_error_retries <= self.temporary_error_retries:\n self.logger.debug('Retry(%s) after calls(%s/%s) since(%s) on error: %r',\n temporary_error_retries, self.calls_count,\n self.calls_elapsed_seconds, self.first_call_time, exc)\n self.sleep(exc.wait_seconds is not None and exc.wait_seconds\n or self.temporary_error_wait_seconds,\n log_reason='temporary error wait')\n else:\n if temporary_error_retries - 1:\n raise self.RetryExceeded(exc, retry_count=temporary_error_retries - 1)\n raise", "def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex", "def make_request(method, url, headers=None, data=None, retries=3):\n no_retry_status = [404, 401, 403]\n may_retry_status = [408, 500, 502, 503]\n\n if not retries:\n return requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n\n while retries:\n r = requests.request(method=method,\n url=url,\n headers=headers,\n data=data)\n if r.status_code in no_retry_status:\n return r\n\n elif r.status_code in may_retry_status:\n retries -= 1\n time.sleep(1)\n\n if retries == 0:\n return r\n continue\n\n else:\n return r", "def do_request(method, url, data=None, headers=None):\n try:\n if method == 'GET':\n resp = requests.get(url, headers=headers)\n return resp\n elif method == 'POST':\n resp = requests.post(url, json=data, headers=headers)\n return resp\n elif method == 'PATCH':\n resp = requests.patch(url, json=data, headers=headers)\n return resp\n except Exception, e:\n print \"Retry {} with {}, {}\".format(str(e), url, data)\n raise e", "def request(self, request_method, url, json_data=None):\n for i in range(int(str(self.retry_n))):\n LOG.debug(\n \"JovianDSS: Sending request of type %(type)s to %(url)s \\\n Attempt: %(num)s.\",\n {'type': request_method,\n 'url': url,\n 'num': i})\n\n if json_data is not None:\n LOG.debug(\n \"JovianDSS: Sending data: %s.\", str(json_data))\n try:\n\n ret = self.request_routine(url, request_method, json_data)\n\n # Work aroud for case when we have backend internal Fail.\n # OS Fail\n if ret[\"code\"] == 500:\n if ret[\"error\"] is not None:\n if (\"errno\" in ret[\"error\"]) and \\\n (\"class\" in ret[\"error\"]):\n if (ret[\"error\"][\"errno\"] is 2) and\\\n (ret[\"error\"][\"class\"] ==\n \"exceptions.OSError\"):\n LOG.debug(\n \"JovianDSS: Facing exceptions.OSError!\")\n continue\n\n return ret\n except requests.HTTPError as err:\n LOG.debug(\"Unable to execute: %s\", err)\n continue\n except requests.ConnectionError as err:\n LOG.debug(\"Unable to execute: %s\", err)\n\n msg = (_('%(times) faild in a row') % {'times': i})\n\n raise exception.JDSSRESTProxyException(host=url, reason=msg)", "def _request(self, method, url, retries=None, **kwargs):\n if retries is None:\n retries = self.retries\n\n try:\n LOG.debug(\"Attempting: %s %s\", method, kwargs)\n if 'SSL_CA' in os.environ:\n return method(url, verify=os.environ['SSL_CA'], **kwargs)\n else:\n return method(url, **kwargs)\n except (requests.exceptions.SSLError, OpenSSL.SSL.Error):\n if 'SSL_CA' in os.environ:\n LOG.info(\"SSL verification failed, trying default certs.\")\n return method(url, **kwargs)\n else:\n LOG.error(\"SSL verification failed.\")\n raise\n except Exception:\n if retries > 0:\n self._request(method, url, retries=retries-1, **kwargs)\n else:\n raise", "def retry_request(\n self,\n tapi_exception,\n error_message,\n repeat_number,\n response,\n request_kwargs,\n api_params,\n **kwargs\n ):\n return False", "def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')", "def test_retry(self):\n self.response.raise_for_status.side_effect = \\\n [requests.HTTPError(), None]\n\n wsgi._retryable('get', 'http://some.thing')\n\n assert self.session.get.call_count == 2", "def _mexe(self, request, sender=None, override_num_retries=None):\r\n boto.log.debug('Method: %s' % request.method)\r\n boto.log.debug('Path: %s' % request.path)\r\n boto.log.debug('Data: %s' % request.body)\r\n boto.log.debug('Headers: %s' % request.headers)\r\n boto.log.debug('Host: %s' % request.host)\r\n response = None\r\n body = None\r\n e = None\r\n if override_num_retries is None:\r\n num_retries = config.getint('Boto', 'num_retries', self.num_retries)\r\n else:\r\n num_retries = override_num_retries\r\n i = 0\r\n connection = self.get_http_connection(request.host, self.is_secure)\r\n while i <= num_retries:\r\n # Use binary exponential backoff to desynchronize client requests\r\n next_sleep = random.random() * (2 ** i)\r\n try:\r\n # we now re-sign each request before it is retried\r\n request.authorize(connection=self)\r\n if callable(sender):\r\n response = sender(connection, request.method, request.path,\r\n request.body, request.headers)\r\n else:\r\n connection.request(request.method, request.path, request.body,\r\n request.headers)\r\n response = connection.getresponse()\r\n location = response.getheader('location')\r\n # -- gross hack --\r\n # httplib gets confused with chunked responses to HEAD requests\r\n # so I have to fake it out\r\n if request.method == 'HEAD' and getattr(response, 'chunked', False):\r\n response.chunked = 0\r\n if response.status == 500 or response.status == 503:\r\n boto.log.debug('received %d response, retrying in %3.1f seconds' %\r\n (response.status, next_sleep))\r\n body = response.read()\r\n elif response.status < 300 or response.status >= 400 or \\\r\n not location:\r\n self.put_http_connection(request.host, self.is_secure, connection)\r\n return response\r\n else:\r\n scheme, request.host, request.path, params, query, fragment = \\\r\n urlparse.urlparse(location)\r\n if query:\r\n request.path += '?' + query\r\n boto.log.debug('Redirecting: %s' % scheme + '://' + request.host + request.path)\r\n connection = self.get_http_connection(request.host, scheme == 'https')\r\n continue\r\n except self.http_exceptions, e:\r\n for unretryable in self.http_unretryable_exceptions:\r\n if isinstance(e, unretryable):\r\n boto.log.debug(\r\n 'encountered unretryable %s exception, re-raising' %\r\n e.__class__.__name__)\r\n raise e\r\n boto.log.debug('encountered %s exception, reconnecting' % \\\r\n e.__class__.__name__)\r\n connection = self.new_http_connection(request.host, self.is_secure)\r\n time.sleep(next_sleep)\r\n i += 1\r\n # If we made it here, it's because we have exhausted our retries and stil haven't\r\n # succeeded. So, if we have a response object, use it to raise an exception.\r\n # Otherwise, raise the exception that must have already happened.\r\n if response:\r\n raise BotoServerError(response.status, response.reason, body)\r\n elif e:\r\n raise e\r\n else:\r\n raise BotoClientError('Please report this exception as a Boto Issue!')", "def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s", "def _retry(self, result, method, url, params_dict, **kwargs):\n return result", "def _get_http_request(url, retry=0):\n try:\n return requests.get(url).json()\n except Exception:\n if retry > 0:\n retry = retry - 1\n # FIXME use invenio-logging?\n return _get_http_request(url=url, retry=retry)", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 金额\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.1.2',\n '_prod_code': 'DEFINCASHIER',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def request( self, method, location, parameters, headers, secure ):\n\t\tif self.__current_proxy != self.proxy:\n\t\t\tself.reset()\n\t\t\tprint \"proxy changed: %r\" % (self,)\n\t\t\n\t\tif self.proxy_must_match:\n\t\t\tif ( self.proxy is None ) or ( not self.proxy_must_match.search(self.proxy) ):\n\t\t\t\traise ValueError(\"Invalid proxy %r!!! Conflicts with proxy_must_match value!\" % (self.proxy,))\n\t\t\n\t\tif self.print_requests:\n\t\t\tprint \"%s %s %r %r\" % (secure and 'HTTPS' or 'HTTP', method, location, self.__use_this_proxy,)\n\t\t\n\t\tif self.requests_before_reconnect > 0:\n\t\t\tif self.requests_count > self.requests_before_reconnect:\n\t\t\t\t#open new connection\n\t\t\t\tself.requests_count = 1\n\t\t\t\tself.reset()\n\t\t\tself.requests_count += 1\n\n\t\tif secure:\n\t\t\tconn = self.https\n\t\telse:\n\t\t\tconn = self.http\n\n\t\tif self.debug:\n\t\t\tprint conn\n\n\t\tif headers and 'Referrer' in headers:\n\t\t\traise ValueError(\"Incorrect spelling - use referer not referrer\")\n\n\t\t# This strips out the :443 of https connections from the Host header by setting it manually.\n\t\tif not 'Host' in headers:\n\t\t\theaders['Host'] = self.site\n\t\t\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\texcept socket.error:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\texcept httplib.CannotSendRequest:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\n\t\t\ttry:\n\t\t\t\tresp = conn.getresponse()\n\t\t\texcept httplib.BadStatusLine:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\tresp = conn.getresponse()\n\t\t\texcept httplib.CannotSendRequest:\n\t\t\t\tconn.close()\n\t\t\t\tconn.request( method, location, parameters, headers )\n\t\t\t\tresp = conn.getresponse()\n\t\texcept Exception, e:\n\t\t\tprint \"Reset browser.py %r because error %r\" % (self, e,)\n\t\t\tself.reset()\n\t\t\traise\n\t\t\n\t\tcookie = resp.getheader( 'set-cookie' )\n\t\tif cookie:\n\t\t\tself.cookies.add( cookie )\n\t\t\n\t\tprotocol = 'http'\n\t\tif secure:\n\t\t\tprotocol = 'https'\n\t\tself.last_visited = '%s://%s%s' % (protocol, self.site, location)\n\t\t\n\t\t# if this is a redirect:\n\t\tif resp.status >= 300 and resp.status < 400:\n\t\t\t# check if the site was specified and it differs from\n\t\t\t# the current one\n\t\t\tconn.close()\n\t\t\tlocation = resp.getheader('location')\n\t\t\t#print \"redirecting to \", location\n\t\t\tparsed_location = urlparse.urlparse(location)\n\t\t\thttp_or_https = protocol\n\t\t\tcls = LocalRedirect\n\t\t\tif parsed_location[1]:\n\t\t\t\tif parsed_location[1] != self.site:\n\t\t\t\t\tcls = ExternalRedirect\n\t\t\t\telse:\n\t\t\t\t\t# ignore the beginning bit\n\t\t\t\t\thttp_or_https = parsed_location[0]\n\t\t\t\t\tparsed_location = list(parsed_location)\n\t\t\t\t\tparsed_location[0] = ''\n\t\t\t\t\tparsed_location[1] = ''\n\t\t\t\t\tlocation = urlparse.urlunparse(parsed_location)\n\t\t\t# raise an exception for the redirection\n\t\t\traise cls(location, resp.status, resp.reason, resp, http_or_https)\n\t\t\n\t\t# set the location that was visited, in case it differs from that which\n\t\t# was specified (i.e because of a redirect)\n\t\tresp.location = location\n\t\treturn resp", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 身份\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.0.212',\n '_prod_code': 'DEMO',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'centre-openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def retry_task(func):\n\n @wraps(func)\n def wrapper(task, *args, **kwargs):\n retries = task.request.retries\n exponential = 2 ** retries\n exponential_backoff = random.randint(exponential, exponential * 2)\n try:\n result = func(task, *args, **kwargs)\n except Exception as e:\n logger.error(\n f\"Retriying {task.request.id} after {exponential_backoff} seconds\"\n )\n raise task.retry(countdown=exponential_backoff, exc=e, max_retries=5)\n\n return result\n\n return wrapper", "def request(self, *args, **kwargs):\n try:\n return self._http.request(*args, timeout=TIMEOUT, **kwargs)\n except Exception as exc:\n raise RequestException(exc, args, kwargs)", "def retry_strategy(self, retry_strat):\n self.retry_strategy = retry_strat\n return self", "def __send_request(self, url, params=None, headers=None):\n\n if self.rate_limit is not None and self.rate_limit <= self.min_rate_to_sleep:\n seconds_to_reset = self.rate_limit_reset_ts - int(time.time()) + 1\n cause = \"GitHub rate limit exhausted.\"\n if self.sleep_for_rate:\n logger.info(\"%s Waiting %i secs for rate limit reset.\", cause, seconds_to_reset)\n time.sleep(seconds_to_reset)\n else:\n raise RateLimitError(cause=cause, seconds_to_reset=seconds_to_reset)\n\n r = requests.get(url, params=params, headers=headers)\n r.raise_for_status()\n self.rate_limit = int(r.headers['X-RateLimit-Remaining'])\n self.rate_limit_reset_ts = int(r.headers['X-RateLimit-Reset'])\n logger.debug(\"Rate limit: %s\" % (self.rate_limit))\n return r", "def retry_mechanism(self):\n # If future is done we can close the transport\n if self.on_response_received.done():\n self.transport.close()\n elif self._retries < self._max_retries:\n self._retries += 1\n logger.debug(f'Retry #{self._retries} of {self._max_retries}')\n self._send_request()\n else:\n logger.debug(f'Max number of retries ({self._max_retries}) reached, closing socket')\n self.on_response_received.set_exception(MaxRetriesException)\n self.transport.close()", "def requests_retry_session(\n retries=0, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n return session", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDurationMillis': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 链上交易中的事件\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.3.1',\n '_prod_code': 'BAASDATAGW',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def retry(\n self,\n retries: int = 3,\n backoff_factor: float = 0.3,\n status_forcelist: list | None = None,\n **kwargs,\n ):\n retry_object = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist or [500, 502, 504],\n )\n urls = kwargs.get('urls') or ['https://']\n\n if self._custom_adapter:\n self._custom_adapter.max_retries = retry_object\n else:\n # TODO: @cblades - max_retries is typed as int and Retry is being passed\n self._custom_adapter = CustomAdapter(\n rate_limit_handler=self.rate_limit_handler, max_retries=retry_object # type: ignore\n )\n\n # mount the custom adapter\n for url in urls:\n self.log.info(\n f'feature=external-session, action=applying-retry, retries={retries}, '\n f'backoff-factor={backoff_factor}, status-forcelist={status_forcelist}, url={url}'\n )\n self.mount(url, self._custom_adapter)", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDurationMillis': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 无分组设备\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.0.45'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 创建凭证One\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.6.10',\n '_prod_code': 'SHUZIWULIU',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)", "async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")", "def do_request(\n self,\n version: str,\n action: str,\n protocol: str,\n method: str,\n pathname: str,\n request: dict,\n headers: Dict[str, str],\n runtime: util_models.RuntimeOptions,\n ) -> dict:\n runtime.validate()\n _runtime = {\n 'timeouted': 'retry',\n 'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),\n 'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),\n 'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),\n 'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),\n 'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),\n 'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),\n 'maxIdleTimeMillis': self._max_idle_time_millis,\n 'keepAliveDuration': self._keep_alive_duration_millis,\n 'maxRequests': self._max_requests,\n 'maxRequestsPerHost': self._max_requests_per_host,\n 'retry': {\n 'retryable': runtime.autoretry,\n 'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)\n },\n 'backoff': {\n 'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),\n 'period': UtilClient.default_number(runtime.backoff_period, 1)\n },\n 'ignoreSSL': runtime.ignore_ssl,\n # 资源定位信息\n }\n _last_request = None\n _last_exception = None\n _now = time.time()\n _retry_times = 0\n while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):\n if _retry_times > 0:\n _backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)\n if _backoff_time > 0:\n TeaCore.sleep(_backoff_time)\n _retry_times = _retry_times + 1\n try:\n _request = TeaRequest()\n _request.protocol = UtilClient.default_string(self._protocol, protocol)\n _request.method = method\n _request.pathname = pathname\n _request.query = {\n 'method': action,\n 'version': version,\n 'sign_type': 'HmacSHA1',\n 'req_time': AntchainUtils.get_timestamp(),\n 'req_msg_id': AntchainUtils.get_nonce(),\n 'access_key': self._access_key_id,\n 'base_sdk_version': 'TeaSDK-2.0',\n 'sdk_version': '1.8.95',\n '_prod_code': 'BOT',\n '_prod_channel': 'undefined'\n }\n if not UtilClient.empty(self._security_token):\n _request.query['security_token'] = self._security_token\n _request.headers = TeaCore.merge({\n 'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),\n 'user-agent': UtilClient.get_user_agent(self._user_agent)\n }, headers)\n tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))\n _request.body = UtilClient.to_form_string(tmp)\n _request.headers['content-type'] = 'application/x-www-form-urlencoded'\n signed_param = TeaCore.merge(_request.query,\n RPCUtilClient.query(request))\n _request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)\n _last_request = _request\n _response = TeaCore.do_action(_request, _runtime)\n raw = UtilClient.read_as_string(_response.body)\n obj = UtilClient.parse_json(raw)\n res = UtilClient.assert_as_map(obj)\n resp = UtilClient.assert_as_map(res.get('response'))\n if AntchainUtils.has_error(raw, self._access_key_secret):\n raise TeaException({\n 'message': resp.get('result_msg'),\n 'data': resp,\n 'code': resp.get('result_code')\n })\n return resp\n except Exception as e:\n if TeaCore.is_retryable(e):\n _last_exception = e\n continue\n raise e\n raise UnretryableException(_last_request, _last_exception)" ]
[ "0.6729656", "0.637971", "0.6109874", "0.60819364", "0.6021752", "0.60116726", "0.59798336", "0.5907625", "0.58662295", "0.583994", "0.5785021", "0.575224", "0.57102096", "0.5658372", "0.56307805", "0.5607868", "0.5588081", "0.5575883", "0.55712205", "0.55710757", "0.5570575", "0.5570352", "0.5566671", "0.5541451", "0.55233896", "0.55128044", "0.55097324", "0.5492836", "0.5470831", "0.5466929" ]
0.6629842
1
Analyse a confusion matrix by printing the True Positive (TP), False Positive (FP), ... and the specificity and sensitivity as well for each classes.
def analyze_confusion_matrix(confusion_matrix): n_classes = len(confusion_matrix) # True positive : correct prediction, ie the diagonal of the confusion matrix TP = confusion_matrix.diag() for c in range(n_classes): idx = torch.ones(n_classes).byte() idx[c] = 0 TN = confusion_matrix[idx.nonzero()[:, None], idx.nonzero()].sum() FP = confusion_matrix[c, idx].sum() FN = confusion_matrix[idx, c].sum() sensitivity = (TP[c] / (TP[c] + FN)) specificity = (TN / (TN + FP)) # Display the analysis in the console print('Class {}\nTP {}, TN {}, FP {}, FN {}'.format(c, TP[c], TN, FP, FN)) print("Sensitivity :", sensitivity) print("Specificity : {0}\n------".format(specificity))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity", "def confusionMatrix(self):\n \n cn_matrix = confusion_matrix(self.y_Actual, self.y_Predicted, labels=[\"Positive\", \"Neutral\", \"Negative\"])\n print(cn_matrix)\n print(\"Accuracy Score:\", accuracy_score(self.y_Actual, self.y_Predicted))\n print(classification_report(self.y_Actual, self.y_Predicted))", "def printConfusion(self,conf):\n print \"y\\pred\\t \" + \"\\t\".join(map(str,range(self.param['numClasses'])))\n i = 0\n for row in conf:\n print str(i) + \"\\t\" + \"\\t\".join(map(str,row))\n i += 1", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def print_confusion_matrix(self) -> None:\n self.metrics_calculator.print_confusion_metrics(\n predicted_probs=self.output_analytics[\"all_pred_probs\"],\n labels=self.output_analytics[\"true_labels_indices\"].unsqueeze(1),\n )", "def confusion_metrics(experiment, plot_matrix = False):\n\n # y_train = experiment['y_train']\n # y_train_prediction = experiment['y_train_prediction']\n y_test = experiment['y_test']\n y_test_prediction = experiment['y_test_prediction']\n\n # precision1, recall1, fbeta1, support1 = precision_recall_fscore_support(y_train, y_train_prediction)\n precision2, recall2, fbeta2, support2 = precision_recall_fscore_support(y_test, y_test_prediction)\n\n # accuracy1 = accuracy_score(y_train, y_train_prediction)\n accuracy2 = accuracy_score(y_test, y_test_prediction)\n\n # TPR and TNR (TPR should equal to recall)\n # TPR1 = np.mean(y_train[y_train_prediction == 1])\n # NPR1 = 1. - np.mean(y_train[y_train_prediction == 0])\n TPR2 = np.mean(y_test[y_test_prediction == 1])\n NPR2 = 1. - np.mean(y_test[y_test_prediction == 0])\n\n # experiment['SCORES_train'] = (precision1, recall1, fbeta1, support1, accuracy1, TPR1, NPR1)\n experiment['SCORES_test'] = (precision2, recall2, fbeta2, support2, accuracy2, TPR2, NPR2)\n\n print('')\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('Testing Results:')\n print(' Class 0')\n print(' Precision = {:,.2f}%'.format(precision2[0] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[0] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[0] * 100))\n print(' Support = {:,.0f}'.format(support2[0]))\n print(' Class 1')\n print(' Precision = {:,.2f}%'.format(precision2[1] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[1] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[1] * 100))\n print(' Support = {:,.0f}'.format(support2[1]))\n print('True positive rate = {:,.2f}'.format(TPR2 * 100))\n print('True negative rate = {:,.2f}'.format(NPR2 * 100))\n print('Accuracy = {:,.2f}%'.format(accuracy2 * 100))\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('')\n\n if plot_matrix:\n cnf_matrix = confusion_matrix(y_test, y_test_prediction)\n plot_confusion_matrix(cnf_matrix, ['$H_0$', '$H_1$'])\n\n return experiment", "def confusion(model, x_test, y_test, show=True, mType=\"\", n_cases=2):\n\n def convert_y_if_needed(y_vals, y_lbl):\n print(\"{} raw shape: {}\".format(y_vals.shape, y_lbl))\n if (len(y_vals.shape) == 2):\n y_vals = argmax(y_vals, axis=1)\n print(\" - converted to: {}\".format(y_vals.shape))\n return y_vals\n\n y_pred = model.predict(x_test)\n y_pred = convert_y_if_needed(y_pred, \"Y Pred\")\n y_test = convert_y_if_needed(y_test, \"Y Test\")\n\n cases = range(n_cases)\n cmNN = confusion_matrix(y_test, y_pred)\n\n acc = sklearn.metrics.accuracy_score(y_test, y_pred) # (TP + TN) / Total\n if n_cases==2:\n rec = sklearn.metrics.recall_score(y_test, y_pred) # TP / (TP + FN)\n pre = sklearn.metrics.precision_score(y_test, y_pred) # TP / (TP + FP)\n\n accLbl = \"Proportion of classifications that are correct = (TP + TN) / Total\"\n recLbl = \"Proportion of relevant cases that were selected = TP / (TP + FN)\"\n preLbl = \"Proportion of selected cases that are relevant = TP / (TP + FP)\"\n print()\n print(\"Confusion Matrix for \" + mType)\n print(\"Accuracy = {:.2%} = {}\".format(acc, accLbl))\n if n_cases == 2:\n print(\"Recall = {:.2%} = {}\".format(rec, recLbl))\n print(\"Precision = {:.2%} = {}\".format(pre, preLbl))\n print()\n if show: myPlot.plot_confusion_matrix(cmNN,cases, show=show, title=mType+\" Confusion Matrix\")\n stats = {\"Accuracy\":acc, \"Precision\":pre, \"Recall\":rec, \"matrix\":cmNN}\n return stats", "def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])", "def main():\n conf_matrix1 = one_vs_all()\n conf_matrix2 = all_vs_all()\n results = my_info() + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix1)) + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix2))\n print results + '\\t\\t'\n\n # sum = 0\n #\n # for i in range(len(conf_matrix1)):\n # sum += conf_matrix1[i][i]\n #\n # print \"One-vs-All corecct classifications: \", sum\n #\n # sum = 0\n #\n # for i in range(len(conf_matrix2)):\n # sum += conf_matrix2[i][i]\n #\n # print \"All-vs-All correct classificatinos: \", sum\n\n #print(\"onevsall\")\n #print_latex_table(conf_matrix1)\n #print(\"allvsall\")\n #print_latex_table(conf_matrix2)", "def multiclass_classification_metrics(confmat):\n n_labels, _ = confmat.shape\n\n # In order to compute the classification metrics we first compute the true\n # positives and negatives, and the false positives and negatives. These are\n # computed by summing the relevand subparts of the confusion matrix,\n # explained below case by case.\n # Then, the classification metrics are computed from the TP, TN, FP, FN.\n\n # True positives are found in the diagonal of the matrix\n tp = np.diag(confmat)\n\n # True negatives are the sums of the submatrices, complementary to the\n # diagonal elements\n ix_args = [[[i for i in range(n_labels) if i != j]] * 2 for j in range(n_labels)]\n tn = np.array([confmat[np.ix_(*args)].sum() for args in ix_args])\n\n # For false negatives we sum every row omitting the diagonal elements\n fn_idcs = [\n ([i] * (n_labels - 1), [j for j in range(n_labels) if j != i])\n for i in range(n_labels)\n ]\n fn = np.array([confmat[idx] for idx in fn_idcs]).sum(axis=1)\n\n # For false positives we sum every column omitting the diagonal elements, hence\n # we need to swap fn indices\n fp_idcs = [(lambda a, b: (b, a))(*idx) for idx in fn_idcs]\n fp = np.array([confmat[idx] for idx in fp_idcs]).sum(axis=1)\n\n # Divisions by zero raise warnings but we replace NaNs later anyway\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n fscore = 2 * (precision * recall) / (precision + recall)\n\n # Replace NaNs with 0s\n accuracy = np.nan_to_num(accuracy)\n precision = np.nan_to_num(precision)\n recall = np.nan_to_num(recall)\n fscore = np.nan_to_num(fscore)\n\n return {\n \"accuracy\": accuracy,\n \"precision\": precision,\n \"recall\": recall,\n \"fscore\": fscore,\n }", "def printData(confusion_matrix):\n numberofClasses = 4\n confusion_matrix = np.array(confusion_matrix)\n value = 5\n \n print(\"MATRIX \", \"*\\t\",\n padding(\"Class 1 \", value), \"\\t|\\t\",\n padding(\"Class 2 \", value), \"\\t|\\t\",\n padding(\"Class 3 \", value), \"\\t|\\t\",\n padding(\"Class 4 \", value), \"\\t*\\tTotal\")\n for i in range(numberofClasses):\n print(\"Class \" + str(i + 1), \" *\\t\",\n padding(confusion_matrix[i][0], value), \"\\t\\t|\\t\",\n padding(confusion_matrix[i][1], value), \"\\t\\t|\\t\",\n padding(confusion_matrix[i][2], value), \"\\t\\t|\\t\",\n padding(confusion_matrix[i][3], value), \"\\t\\t*\\t\",\n padding(np.sum(confusion_matrix[i]), value))\n print(\"--------------------------------------------------------------------------------------------------------------\\n\")\n print(\"Total -->\\t\",\n padding(np.sum(confusion_matrix[:, 0]), value), \"\\t\\t+\\t\",\n padding(np.sum(confusion_matrix[:, 1]), value), \"\\t\\t+\\t\",\n padding(np.sum(confusion_matrix[:, 2]), value), \"\\t\\t+\\t\",\n padding(np.sum(confusion_matrix[:, 3]), value), \"\\t\\t=\\t\",\n padding(np.sum(confusion_matrix[:, 0:4]), value))\n print(\"--------------------------------------------------------------------------------------------------------------\\n\")", "def analyze_confusion(confusion, true_labels):\n print(\"Confusion matrix:\")\n printer(\"Pre\\Tru\", true_labels)\n\n for line, label in zip(confusion, true_labels):\n printer(f\"{label}\", line)\n\n TP = confusion.diagonal()\n FN = np.sum(confusion, axis=0) - TP\n FP = np.sum(confusion, axis=1) - TP\n\n print()\n printer(\"TP\", TP)\n printer(\"FP\", FP)\n printer(\"FN\", FN)\n\n # https://stackoverflow.com/a/37977222\n # P = TP / ( TP + FP)\n # R = TP / ( TP + FN)\n dP = TP + FP\n P = np.divide(TP, dP, out=np.zeros_like(TP, dtype=float), where=dP != 0)\n dR = TP + FN\n R = np.divide(TP, dR, out=np.zeros_like(TP, dtype=float), where=dR != 0)\n\n print(\"\\nPrecision = TP / ( TP + FP)\\tRecall = TP / ( TP + FN)\")\n printer(\"Prec\", P, \":.4f\")\n printer(\"Recall\", R, \":.4f\")\n\n avgP = np.sum(P) / len(true_labels)\n avgR = np.sum(R) / len(true_labels)\n print(f\"Average P: {avgP:.4f}\\tR: {avgR:.4f}\")\n\n print(\"F-score = 2 (PxR) / (P+R)\")\n # F = 2 (PxR) / (P+R)\n PdR = 2 * P * R\n PpR = P + R\n F = np.divide(PdR, PpR, out=np.zeros_like(TP, dtype=float), where=PpR != 0)\n printer(\"F-score\", F, \":.4f\")\n\n avgF = np.sum(F) / len(true_labels)\n print(f\"Average F-score {avgF}\")\n\n return avgF", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def confusion_matrix(self):\n print('Confusion Matrix ...')\n cm = confusion_matrix(self.y_test, self.y_pred, normalize='true')\n df = pd.DataFrame(cm)\n df.to_csv('csv/cm/' + self.model_name + '_' + self.label + '_cm.csv')\n print(df)", "def build_conf_matrix(self):\n pred_vals = []\n for prediction in self.predicted_values:\n pred_vals.append(np.argmax(prediction))\n\n print confusion_matrix(self.true_values, pred_vals, labels=[1, 2, 3, 4])\n\n self.logger.info(\"Confusion Matrix : {}\".format(confusion_matrix(self.true_values, pred_vals,\n labels=[1, 2, 3, 4])))", "def print_model_analysis(predictions, targets, print_conf_matrix=False):\n accuracy = accuracy_per_shift(predictions, targets)\n built_in_accuracy = accuracy_score(targets, predictions)\n\n conf_matrix = custom_confusion_matrix(predictions, targets)\n\n precision, recall = precision_recall(conf_matrix)\n\n print('Accuracy:', accuracy)\n print('Built-in accuracy:', built_in_accuracy)\n if print_conf_matrix:\n print('Confusion Matrix:\\n', conf_matrix)\n print('Precision:', precision)\n print('Recall:', recall)\n\n print()", "def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix", "def confusion_matrix(y_true, y_pred, table_show=True):\n\tFIRST_CLASS = 1\n\tSECOND_CLASS = 0\n\n\tzipped = np.array(list(zip(y_true, y_pred)))\n\ttp, fn, fp, tn = 0, 0, 0, 0\n\n\tfor y_true, y_pred in zipped:\n\t\tif y_true == y_pred and y_true == FIRST_CLASS:\n\t\t\ttp += 1\n\t\telif y_true == y_pred and y_true == SECOND_CLASS:\n\t\t\ttn += 1\n\t\telif y_true != y_pred and y_true == SECOND_CLASS:\n\t\t\tfp += 1\n\t\telse:\n\t\t\tfn += 1\n\n\tif table_show:\n\t\treturn np.array([tp, fn, fp, tn]).reshape([2,2])\n\n\treturn tp, fn, fp, tn", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def full_multiclass_report(y_true, y_pred, classes, png_output=None, show=True):\n\n # Print accuracy score\n print(\"Accuracy : \"+ str(accuracy_score(y_true,y_pred)))\n \n print(\"\")\n \n # Print classification report\n print(\"Classification Report\")\n Eval.classification_report(y_true,y_pred,digits=5)\n \n # Plot confusion matrix\n cnf_matrix = confusion_matrix(y_true,y_pred)\n print(cnf_matrix)\n\n Eval.plot_confusion_matrix(cnf_matrix,classes=classes, png_output=png_output, show=show)", "def compute_classification_metrics(confmat):\n tn, fp, fn, tp = confmat.ravel()\n tn, fp, fn, tp = map(int, (tn, fp, fn, tp)) # cast from numpy.int64\n return (\n accuracy(tp, tn, fp, fn),\n precision(tp, fp),\n recall(tp, fn),\n fscore(tp, fp, fn),\n )", "def conf_matrix(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import confusion_matrix\n import itertools\n if train==True: \n ypredTrain = model.predict(X_train)\n cm = confusion_matrix(y_train, ypredTrain)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Reds):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Train)\\n')\n elif train==False:\n ypredTest = model.predict(X_test)\n cm = confusion_matrix(y_test, ypredTest)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Blues):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Test)\\n')", "def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def performance_matrix(df):\r\n total = df.count()\r\n nP = df.filter((F.col('prediction') == 1)).count()\r\n nN = df.filter((F.col('prediction') == 0)).count()\r\n TP = df.filter((F.col('prediction') == 1) & (F.col('label') == 1)).count()\r\n FP = df.filter((F.col('prediction') == 1) & (F.col('label') == 0)).count()\r\n FN = df.filter((F.col('prediction') == 0) & (F.col('label') == 1)).count()\r\n TN = df.filter((F.col('prediction') == 0) & (F.col('label') == 0)).count()\r\n \r\n print('num positive: {}'.format(nP))\r\n print('num negative: {}'.format(nN))\r\n print(\"True Positives:\", TP)\r\n print(\"True Negatives:\", TN)\r\n print(\"False Positives:\", FP)\r\n print(\"False Negatives:\", FN)\r\n print('accuracy: {}'.format((TP + TN) / total))\r\n \r\n if TP == 0:\r\n print(\"Precision: 0\")\r\n print(\"Recall: 0\")\r\n \r\n else:\r\n print('recall: {}'.format(TP / (TP + FN)))\r\n print('precision: {}'.format(TP / (TP + FP)))", "def accuracy_measures(predictions, trues):\n\n tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()\n print \"\\t(tn, fp, fn, tp) =\", (tn, fp, fn, tp)\n\n # how often is classifier correct?\n print \"\\tAccuracy = {:.2%}\".format(float(tp + tn) / len(trues))\n\n # how often is it wrong?\n print \"\\tMisclassification Rate = {:.2%}\".format(float(fp + fn) / len(trues))\n\n # when actually yes, how often does it predict yes?\n print \"\\tTrue Positive Rate = {:.2%}\".format(float(tp) / trues.count(True))\n\n # when actually no, how often does it predict yes?\n print \"\\tFalse Positive Rate = {:.2%}\".format(float(fp) / trues.count(False))\n\n # when actually no, how often does it predict no?\n print \"\\tSpecificity = {:.2%}\".format(float(tn) / trues.count(False))\n\n # when it predicts yes, how often is it correct?\n print \"\\tPrecision = {:.2%}\".format(float(tp) / predictions.count(True))\n\n # how often does yes condition occur in our sample?\n print \"\\tPrevalence = {:.2%}\\n\".format(float(trues.count(True)) / len(trues))\n\n # return accuracy, precision, and recall score\n return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(\n trues, predictions, average='binary')", "def print_classification_info(clf, x, y):\n x_tr, x_ts, y_tr, y_ts = train_test_split(x, y, train_size=0.8, test_size=0.2)\n clf.fit(x_tr, y_tr)\n p = clf.predict(x_ts)\n print(classification_report(y_ts, p))\n print(confusion_matrix(y_ts, p))", "def confusionMetric( self, classTest, classPred):\n # accuracy of the model - in one number\n accuracy = average_precision_score( classTest, classPred )\n # confusion matrix 2x2 matric\n matConf = confusion_matrix(classTest, classPred)\n # cohen Kappa is applicable for unbalanced data\n matCohenKappa = cohen_kappa_score(classTest, classPred)\n # classification report\n strClassificationReport = classification_report(classTest, classPred)\n \n return accuracy, matConf, matCohenKappa, strClassificationReport", "def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)", "def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total" ]
[ "0.74491894", "0.7377686", "0.73206645", "0.7228996", "0.71899956", "0.7060822", "0.7049538", "0.685456", "0.6830191", "0.68265086", "0.68125165", "0.6800462", "0.67516446", "0.6725235", "0.67091966", "0.6700737", "0.6692203", "0.6686371", "0.66814786", "0.6649996", "0.6647551", "0.6610041", "0.66030234", "0.66001785", "0.6596758", "0.65871054", "0.65685564", "0.64982283", "0.6494195", "0.6478992" ]
0.75660235
0
Constructs a new ReplicatorValue object
def __init__(self, replicator_id=''): self.replicator_id = replicator_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_rep(self, rep):\n return self._new(rep, self.shape, self.domain)", "def copy(self):\n return self.__class__(\n group_list=self.group_list, bits_except_last=self.bits_except_last,\n max_value=self.max_value\n )", "def new_value(self, value=0):\n return data_value_factory(self.type, value)", "def new_value(self):\n return data_value_factory(self.type, self.name)", "def new_value(self):\n return data_value_factory(self.type, self.name)", "def new_value(self):\n return data_value_factory(self.type)", "def __copy__(self):\n return type(self)(self.value)", "def new_value(self, value=0.0):\n return data_value_factory(self.type, value)", "def new_value(self, values=None):\n return data_value_factory(self.type, values)", "def copy(self):\n return self.__class__(self.value, self.is_cloud)", "def clone(self) -> \"set_default\":\n return type(self)(self.value)", "def _from_value(cls, value):\n self = object.__new__(cls)\n self.name = value\n self.value = value\n self.metadata_type = IntegrationMetadataSubscription\n return self", "def __new__(cls, value: int):\n new_object = super(CalibrationStatus, cls).__new__(cls, value)\n new_object._value = value # type: ignore[attr-defined]\n new_object._binary = new_object._to_binary() # type: ignore[attr-defined]\n return new_object", "def new_value(self, value=b''):\n return data_value_factory(self.type, value)", "def __copy__(self):\n return Quantity(copy.copy(self._value), self.unit)", "def create(self, other=None):\n answer = type(self)(other)\n answer.__dict__ = self.__dict__\n answer.preferred_rep()\n return answer", "def apply_replicator(self, replicator, template_values, template_index=-1,\n template_value=None, update_input_references=False):\n return {}", "def copy(self):\n new = self.__class__()\n new.values = self.values.copy()\n return new", "def new_value(self, value=\"\"):\n return data_value_factory(self.type, value)", "def new_value(self, value=\"\"):\n return data_value_factory(self.type, value)", "def _new(cls, rep, shape, domain):\n cls._check(rep, shape, domain)\n obj = object.__new__(cls)\n obj.rep = rep\n obj.shape = obj.rows, obj.cols = shape\n obj.domain = domain\n return obj", "def __init__(self, value):\n self.value = value", "def new_value(self, value=False):\n return data_value_factory(self.type, value)", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\r\n self.value = value" ]
[ "0.6084624", "0.6004562", "0.59728104", "0.5943229", "0.5943229", "0.591257", "0.59099877", "0.5903905", "0.59019524", "0.5872284", "0.5861993", "0.58383065", "0.5753103", "0.5664996", "0.5662879", "0.56450135", "0.56154025", "0.56136125", "0.55858725", "0.55858725", "0.55662155", "0.55628276", "0.5559925", "0.5555795", "0.5555795", "0.5555795", "0.5555795", "0.5555795", "0.5555795", "0.5552928" ]
0.6295505
0
Sets this components path from individual components.
def _from_components(self, property_name, *protocol_ids): assert property_name is not None and isinstance(property_name, str) assert property_name.find(ProtocolPath.path_separator) < 0 for protocol_id in protocol_ids: assert protocol_id is not None and isinstance(protocol_id, str) assert protocol_id.find(ProtocolPath.property_separator) < 0 and \ protocol_id.find(ProtocolPath.path_separator) < 0 protocol_path = ProtocolPath.path_separator.join(protocol_ids) if len(protocol_ids) == 0: protocol_path = '' self._full_path = '{}{}{}'.format(protocol_path, ProtocolPath.property_separator, property_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetPath(self, path):\n\n\t\tfor point in path:\n\t\t\t# Check if correct type\n\t\t\tif type(point) != Point:\n\t\t\t\traise TypeError(\"Not of type Core.Types.Point\")\n\n\t\tself.path = path", "def set_path(self, key, value):\n return set_path(self, key, self.from_obj(value))", "def set(self, new_path):\n\n for i in range(self.depth):\n self.path[i] = new_path[self.max_input*i:self.max_input*(i + 1)]", "def path(self, path: List[Path]):\n\n self._path = path", "def alternate_path(self, path):\n cls = self.__class__\n instance = cls(path)\n for slot in cls.__slots__:\n if slot != \"build_path\":\n setattr(instance, slot, getattr(self, slot))\n return instance", "def setPath(*args):", "def _set_path(self):\n self.path = self._get_path()\n self.depth = self.get_depth()\n\n self.save()", "def __init__(self, property_name='', *protocol_ids):\n\n self._full_path = ''\n\n if len(property_name) > 0 or len(protocol_ids) > 0:\n self._from_components(property_name, *protocol_ids)\n\n else:\n self._full_path = '{}'.format(ProtocolPath.property_separator)", "def __init__(self, path):\n for key, value in path.items():\n setattr(self, \"_%s\" % key, value)", "def set_paths(self, paths):\n self.paths = paths", "def setPath(self, name, value):\n response = self.extendPath(name, value, True, True)\n return response", "def setByPath(self, keys, value):\n self.getByPath(keys[:-1])[keys[-1]] = value", "def _construct_path(self, sep, with_drive_letter=True):\n result = sep.join(self._components)\n if self._absolute:\n result = \"{}{}\".format(sep, result)\n if with_drive_letter and self._drive_letter:\n result = \"{}:{}\".format(self._drive_letter, result)\n return result", "def __init__(self, path, **kw):\n\n if not path:\n raise ValueError(\"Empty path\")\n\n if isinstance(path, list):\n ipath = path[:]\n self._drive_letter = ipath.pop(0)[0] if RX_LETTER.match(ipath[0]) else None\n self._components = _normalize_dots(ipath)\n self._absolute = True\n else:\n context = kw.get(\"context\")\n if context:\n path = _expand_context(path, context)\n\n if not kw.get(\"no_expand\", False):\n path = os.path.expanduser(os.path.expandvars(path))\n\n match = RX_LETTER.match(path)\n self._drive_letter = match.group(1) if match else None\n remainder = re.sub(RX_LETTER, \"\", path)\n\n self._absolute = remainder[0] in [\"/\", \"\\\\\"]\n\n if \":\" in remainder:\n raise ValueError(\"Bad characters in path '{}'\".format(remainder))\n\n self._components = _normalize_dots(\n [s for s in re.split(\"/|\\\\\\\\\", remainder) if s]\n )\n\n self._depth = len(self._components)", "def path(self, path):\n self._path = path", "def set_paths(self, paths):\n self._paths = paths\n self._paths_set = True", "def paths(self, paths):\n\n self._paths = paths", "def paths(self, paths):\r\n self._paths = paths\r\n self._extract()", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def path(self, path):\n\n self._path = path", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def __init__(self, newPath):\n self.newPath = newPath", "def new_path(path):\n return Path(path[0], path[1], path[2])", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()", "def set(self, v):\n self.components = v.components", "def __set__(self, obj, val):\n try:\n self._resolve(val)\n except IOError, e:\n Parameterized(name=\"%s.%s\"%(obj.name,self._attrib_name)).warning('%s'%(e.args[0]))\n\n super(Path,self).__set__(obj,val)", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)" ]
[ "0.61862373", "0.6101257", "0.6078512", "0.5973134", "0.5934757", "0.5862571", "0.5811863", "0.5741532", "0.5681197", "0.5680933", "0.567429", "0.5666033", "0.56404704", "0.56123084", "0.56061804", "0.56000787", "0.5571635", "0.552776", "0.5526396", "0.5526396", "0.5526396", "0.5526396", "0.5526396", "0.5469893", "0.5462737", "0.54400414", "0.54373795", "0.54213613", "0.53968614", "0.5385627" ]
0.67479634
0
Splits a protocol path string into the property name, and the individual protocol ids.
def to_components(path_string): property_name_index = path_string.find(ProtocolPath.property_separator) property_name = path_string[property_name_index + 1:] protocol_id_path = path_string[:property_name_index] protocol_ids = protocol_id_path.split(ProtocolPath.path_separator) if len(protocol_id_path) == 0: protocol_ids = [] return property_name, protocol_ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _from_components(self, property_name, *protocol_ids):\n\n assert property_name is not None and isinstance(property_name, str)\n\n assert property_name.find(ProtocolPath.path_separator) < 0\n\n for protocol_id in protocol_ids:\n\n assert protocol_id is not None and isinstance(protocol_id, str)\n\n assert protocol_id.find(ProtocolPath.property_separator) < 0 and \\\n protocol_id.find(ProtocolPath.path_separator) < 0\n\n protocol_path = ProtocolPath.path_separator.join(protocol_ids)\n\n if len(protocol_ids) == 0:\n protocol_path = ''\n\n self._full_path = '{}{}{}'.format(protocol_path,\n ProtocolPath.property_separator,\n property_name)", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def splitparams(path):\n if '/' in path:\n i = path.find(';', path.rfind('/'))\n else:\n i = path.find(';')\n if i < 0:\n return path, ''\n return path[:i], path[i + 1:]", "def parse_managed_path(path):\n fields = path.split(':', 1)\n return fields[0], fields[1]", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def get_url_components(self, url):\n if 'http://' not in url and 'https://' not in url:\n print(\"Protocol not found, skipping: \" + url)\n return False\n if url[:7] == 'http://':\n protocol = url[:7]\n file_path = url[7:]\n elif url[:8] == 'https://':\n protocol = url[:8]\n file_path = url[8:]\n else:\n print(\"Error when parsing protocol. Skipping: \" + url)\n return False\n # Split the string from the last '/'.\n # To do this, we reverse the string, split from the first '/' and\n # then reverse them both back.\n filename, root_and_directory = [x[::-1] for x in file_path[::-1].split('/', 1)]\n # Replace the lost '/'\n root_and_directory = root_and_directory + '/'\n root, directory = root_and_directory.split('/', 1)\n directory = '/' + directory\n return [protocol, root, directory, filename]", "def parse_path(path):\n assert path is not None and len(path) > 0, \"Invalid path: %s.\" % str(path)\n if not isinstance(path, tuple):\n path = str(path).split('.')\n return path", "def split(self, path):\n if not self.is_managed_path(path):\n return os.path.split(path)\n client, _ = self._get_storage(path)\n prefix, rel_path = self.parse_managed_path(path)\n return (\"%s:\" % prefix,) + client.split(rel_path)", "def split_datastore_path(datastore_path):\n spl = datastore_path.split('[', 1)[1].split(']', 1)\n path = \"\"\n if len(spl) == 1:\n datastore_name = spl[0]\n else:\n datastore_name, path = spl\n return datastore_name, path.strip()", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def _split_varpath(self, path):\n try:\n compname, varname = path.split('.', 1)\n except ValueError:\n return (None, self, path)\n\n t = self.get_trait(compname)\n if t and t.iotype:\n return (None, self, path)\n return (compname, getattr(self, compname), varname)", "def _get_path_infomation(self):\n long_identifier = self._device_path.split('/')[4]\n protocol, remainder = long_identifier.split('-', 1)\n identifier, _, device_type = remainder.rsplit('-', 2)\n return (protocol, identifier, device_type)", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def split(path):\r\n if path.lower().startswith(\"smb://\"):\r\n if '/' not in path[6:]:\r\n path = path.replace(\"smb://\", \"smb:///\", 1)\r\n return path.rsplit('/', 1)\r\n else:\r\n return os.path.split(path)", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def _urlparse_splitscheme(url):\r\n # The scheme is valid only if it contains these characters.\r\n scheme_chars = \\\r\n \"abcdefghijklmnopqrstuvwxyz0123456789+-.\"\r\n\r\n scheme = \"\"\r\n rest = url\r\n\r\n spart = url.split(\":\", 1)\r\n if len(spart) == 2:\r\n\r\n # Normalize the scheme.\r\n spart[0] = spart[0].lower()\r\n\r\n # A scheme is valid only if it starts with an alpha character.\r\n if spart[0] and spart[0][0].isalpha():\r\n for char in spart[0]:\r\n if char not in scheme_chars:\r\n break\r\n (scheme, rest) = spart\r\n\r\n return scheme, rest", "def split_path(self, path):\n path = path.strip(\"/\")\n return path.split(\"/\") if len(path) > 0 else []", "def _split_url(url):\n return url[1:].split('/')", "def splitPath(path):\n return tuple(\n element for element in os.path.split(path.rstrip(os.path.sep)) if element\n )", "def tokenize_path(path):\n # form a list of tuples that mark the start and end positions of steps\n separators = []\n last_position = 0\n i = -1\n in_string = False\n while i < len(path) - 1:\n i = i + 1\n if path[i] == \"'\":\n in_string = not in_string\n if in_string:\n # slashes within strings are not step separators\n continue\n if path[i] == '/':\n if i > 0:\n separators.append((last_position, i))\n if (path[i+1] == '/'):\n last_position = i\n i = i + 1\n else:\n last_position = i + 1\n separators.append((last_position, len(path)))\n\n steps = []\n for start, end in separators:\n steps.append(path[start:end])\n return steps", "def _GetPathParamsFromPath(path: str) -> List[str]:\n path_params = []\n\n components = path.split(\"/\")\n for component in components:\n if _IsPathParameter(component):\n normalized_component = _NormalizePathComponent(component)\n normalized_component = normalized_component[1:-1]\n path_params.append(normalized_component)\n\n return path_params", "def url_split(url):\n scheme, netloc = urllib.splittype(url)\n host, document = urllib.splithost(netloc)\n port = default_ports.get(scheme, 0)\n if host:\n host = host.lower()\n host, port = splitport(host, port=port)\n return scheme, host, port, document", "def splitPath(self, path):\n return os.path.split(path)", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def split_network_line(line):\n fields = line.strip().split()\n receive_field = fields[1]\n transmit_field = fields[2]\n (recv_calls, rx_octets) = receive_field.split('/')\n (send_calls, tx_octets) = transmit_field.split('/')\n return (int(recv_calls), int(rx_octets), int(send_calls), int(tx_octets))", "def _parse_path(p_names):\n gnmi_elems = []\n for word in p_names:\n word_search = _RE_PATH_COMPONENT.search(word)\n if not word_search: # Invalid path specified.\n raise XpathError('xpath component parse error: %s' % word)\n if word_search.group('key') is not None: # A path key was provided.\n tmp_key = {}\n for x in re.findall(r'\\[([^]]*)\\]', word):\n tmp_key[x.split(\"=\")[0]] = x.split(\"=\")[-1]\n gnmi_elems.append(gnmi_pb2.PathElem(name=word_search.group(\n 'pname'), key=tmp_key))\n else:\n gnmi_elems.append(gnmi_pb2.PathElem(name=word, key={}))\n return gnmi_pb2.Path(elem=gnmi_elems)", "def split_by_hash_sign(path: str) -> List[str]:\n if \"#\" in path:\n split_path = path.split(\"#\")\n if len(split_path) > 2:\n raise Exception(f\"There should be maximum one '#' in the path {path}\")\n return split_path\n return [path]", "def parse_path(path: str) -> list:\n arr = path.split(\"/\")\n if arr[0] == \"m\":\n arr = arr[1:]\n if len(arr) == 0:\n return []\n if arr[-1] == \"\":\n # trailing slash\n arr = arr[:-1]\n for i, e in enumerate(arr):\n if e[-1] == \"h\" or e[-1] == \"'\":\n arr[i] = int(e[:-1]) + 0x80000000\n else:\n arr[i] = int(e)\n return arr", "def split_grammar_rule_name(name):\n\n names = name.split(':', maxsplit=1)\n if len(names) < 2:\n names.insert(0, '')\n return names[0], names[1]" ]
[ "0.61609274", "0.6133861", "0.60456616", "0.6042714", "0.6003168", "0.5955503", "0.5950088", "0.59124535", "0.5872106", "0.5835307", "0.5825878", "0.5824099", "0.57710177", "0.57510704", "0.5652095", "0.562677", "0.5614178", "0.55737364", "0.55539405", "0.5530218", "0.5516221", "0.5478423", "0.54538625", "0.5449326", "0.54394484", "0.54298437", "0.5413835", "0.5400716", "0.53992623", "0.53989834" ]
0.79196525
0
Prepend a new protocol id onto the front of the path.
def prepend_protocol_id(self, id_to_prepend): property_name, protocol_ids = ProtocolPath.to_components(self._full_path) if len(protocol_ids) == 0 or (len(protocol_ids) > 0 and protocol_ids[0] != id_to_prepend): protocol_ids.insert(0, id_to_prepend) self._from_components(property_name, *protocol_ids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepend_path(path, paths):\n\n if path in paths: paths.remove(path)\n paths.insert(0, path)", "def replace_protocol(self, old_id, new_id):\n self._full_path = self._full_path.replace(old_id, new_id)", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def test_PrependPathPreserveOld(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = PrependPath(p1, r'C:\\dir\\num\\two', sep=';', delete_existing=0)\n p1 = PrependPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\three;C:\\dir\\num\\one;C:\\dir\\num\\two', p1", "def test_PrependPath(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n p2 = r'C:\\mydir\\num\\one;C:\\mydir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = PrependPath(p1, r'C:\\dir\\num\\two', sep=';')\n p1 = PrependPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\three;C:\\dir\\num\\two;C:\\dir\\num\\one', p1\n\n p2 = PrependPath(p2, r'C:\\mydir\\num\\three', sep=';')\n p2 = PrependPath(p2, r'C:\\mydir\\num\\one', sep=';')\n assert p2 == r'C:\\mydir\\num\\one;C:\\mydir\\num\\three;C:\\mydir\\num\\two', p2\n\n # check (only) first one is kept if there are dupes in new\n p3 = r'C:\\dir\\num\\one'\n p3 = PrependPath(p3, r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\two', sep=';')\n assert p3 == r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\one', p3", "def prepend_to(self, key, entry):\n try:\n tail = os.path.pathsep + self[key]\n except KeyError:\n tail = \"\"\n self[key] = entry + tail", "def _path_insert(previous, value):\n prefix = (value,)\n suffix = filter(None, previous.split(os.pathsep))\n return os.pathsep.join(_unique(itertools.chain(prefix, suffix)))", "def make_id_path(base_path, id_) -> Path:\n\n return base_path / (ID_FMT.format(id=id_))", "def ssl_pathprefix(self, code) -> str:\n # Override this ONLY if the wiki family requires a path prefix\n return ''", "def insertIntoPath(original, insertion='rest'):\n slashIndex = original.index('/',1)\n newString = '%s/%s%s' % (original[0:slashIndex], insertion, original[slashIndex:len(original)])\n return newString", "def prefix_tilde_with_slash(path):\n if begins_with_tilde(path):\n return \"/\" + path\n else:\n return path", "def addPathPrefix(prefix, pathname):\n new_pathname = os.path.join(os.path.dirname(pathname), prefix + os.path.basename(pathname))\n return new_pathname", "def prefixed_id(self, id):\n #The reason why we don't just use the external id and put the model as the prefix\n #is to avoid unique ir_model_data#name per module constraint violation.\n return self._name.replace('.', '_') + '/' + str(id)", "def path_to_htid(path):\n\n # Drop the leading path.\n filename = os.path.split(path)[-1]\n\n # Split the library code and record id.\n lib_code, rec_id = filename.split('.', maxsplit=1)\n\n # Drop any file extensions.\n rec_id = rec_id.split('.', 1)[0]\n\n # Undo the remaining character substitutions.\n rec_id = rec_id.replace('+', ':').replace('=', '/').replace(',', '.')\n\n # Reunite the library code and record id.\n return f'{lib_code}.{rec_id}'", "def protocol_id(self, protocol_id):\n self._protocol_id = protocol_id", "def replace_protocol(self, old_id, new_id):\n\n for input_path in self.required_inputs:\n\n input_path.replace_protocol(old_id, new_id)\n\n if input_path.start_protocol is not None or (input_path.start_protocol != input_path.last_protocol and\n input_path.start_protocol != self.id):\n continue\n\n value_references = self.get_value_references(input_path)\n\n for value_reference in value_references.values():\n value_reference.replace_protocol(old_id, new_id)\n\n for output_path in self.provided_outputs:\n output_path.replace_protocol(old_id, new_id)\n\n if self._id == old_id:\n self._id = new_id", "def leadingSlash(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n if path[0] == '/':\n return path\n else:\n return '/' + path", "def add_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path\n else:\n return path + \"/\"\n else:\n return path + \"/\"", "def append_to_path(existing, addition):\n if existing == Path.rootPath():\n return Path.rootPath() + addition\n return \"{}.{}\".format(existing, addition)", "def update_url(self, path):\n self._url += \"/%s\" % path.lstrip(\"/\")\n return self", "def prepend(self, value):\n pass", "def _get_prepend(compiler, osname):\n if compiler in [\"gfortran\", \"gcc\", \"g++\", \"clang\"]:\n prepend = \"-\"\n else:\n if osname in [\"linux\", \"darwin\"]:\n prepend = \"-\"\n else:\n prepend = \"/\"\n return prepend", "def url_add(path, pk=None):\n path = url_clean(path).replace(settings.ACTION_ADMIN_LIST_SUFFIX, '')\n\n if pk is not None:\n path += '%s/' % pk\n\n return path", "def translate_path(self, path):\n # abandon query parameters\n path = path.split('?',1)[0]\n path = path.split('#',1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n path = posixpath.normpath(urllib.unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = str(port_number)\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir): continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path", "def protocol_id(self, protocol_id):\n\n self._protocol_id = protocol_id", "def prepId(self, id, subchar='_'):\n return globalPrepId(id, subchar)", "def format_path(path):\n return path if path.endswith('/') else path + '/'", "def chomp_protocol(url: str) -> str:\n if \"+\" in url:\n url = url.split(\"+\", 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if \"@\" in path:\n path, rev = path.rsplit(\"@\", 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, \"\"))\n if url.startswith(\"ssh://[email protected]/\"):\n url = url.replace(\"ssh://\", \"git+ssh://\")\n elif \"://\" not in url:\n assert \"file:\" not in url\n url = url.replace(\"git+\", \"git+ssh://\")\n url = url.replace(\"ssh://\", \"\")\n return url", "def add_closing_slash(url_path: str) -> str:\r\n if url_path[-1:] != \"/\":\r\n url_path = url_path + \"/\"\r\n return url_path", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"" ]
[ "0.66855586", "0.66787505", "0.6110264", "0.6004697", "0.59061235", "0.5879382", "0.5854363", "0.58252287", "0.56801903", "0.55817735", "0.5521786", "0.5509535", "0.549153", "0.54776144", "0.5396674", "0.53601295", "0.53539044", "0.5328268", "0.53162754", "0.5288578", "0.5278995", "0.527816", "0.52623796", "0.5254826", "0.5244346", "0.52364343", "0.5190872", "0.5190353", "0.51489717", "0.5143773" ]
0.8019293
0
Pops and then returns the leading protocol id from the path. Returns
def pop_next_in_path(self): property_name, protocol_ids = ProtocolPath.to_components(self._full_path) if len(protocol_ids) == 0: return None next_in_path = protocol_ids.pop(0) self._from_components(property_name, *protocol_ids) return next_in_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_path_infomation(self):\n long_identifier = self._device_path.split('/')[4]\n protocol, remainder = long_identifier.split('-', 1)\n identifier, _, device_type = remainder.rsplit('-', 2)\n return (protocol, identifier, device_type)", "def prepend_protocol_id(self, id_to_prepend):\n property_name, protocol_ids = ProtocolPath.to_components(self._full_path)\n\n if len(protocol_ids) == 0 or (len(protocol_ids) > 0 and protocol_ids[0] != id_to_prepend):\n protocol_ids.insert(0, id_to_prepend)\n\n self._from_components(property_name, *protocol_ids)", "def get_id_netnspath(path):\n return os.path.basename(os.path.normpath(path))", "def protocol_path(protocol_file):\n return os.path.join(PROTO, protocol_file)", "def get_id(self, url):\n return url.split('/')[-1]", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def _extract_id(self, dirty_id):\n if dirty_id[:1] == \"/\":\n return dirty_id.split(\"/\")[-1]\n else:\n return dirty_id", "def get_id_from_path(path):\n id = os.path.basename(path)\n if id.endswith('.xml'):\n id = id[:-4]\n return id", "def getid(data):\n return int(data.split('/')[-1])", "def getPID(self) -> \"Optional[str]\":\n the_pid: \"Optional[str]\"\n if self.id is not None:\n the_pid = str(self.id)\n parsedRepoURL = urllib.parse.urlparse(the_pid)\n\n # If it is not an URI / CURIE\n if parsedRepoURL.scheme == \"\":\n if (self.trs_endpoint is not None) and len(self.trs_endpoint) > 0:\n parsedTRSURL = urllib.parse.urlparse(self.trs_endpoint)\n trs_steps: \"Sequence[str]\" = parsedTRSURL.path.split(\"/\")\n pid_steps = [\"\", urllib.parse.quote(the_pid, safe=\"\")]\n\n if self.version_id is not None:\n pid_steps.append(\n urllib.parse.quote(str(self.version_id), safe=\"\")\n )\n\n the_pid = urllib.parse.urlunparse(\n urllib.parse.ParseResult(\n scheme=TRS_SCHEME_PREFIX,\n netloc=parsedTRSURL.netloc,\n path=\"/\".join(pid_steps),\n params=\"\",\n query=\"\",\n fragment=\"\",\n )\n )\n else:\n self.logger.debug(\"trs_endpoint was not provided\")\n the_pid = None\n else:\n the_pid = None\n\n return the_pid", "def first_path_segment(self, path):\n if not path:\n return None\n slashes = ''\n while path.startswith('/'):\n slashes += '/'\n path = path[1:]\n idx = path.find('/')\n if idx == -1:\n idx = len(path)\n return path, idx, slashes", "def first_path(self):\n\t\treturn self.args[1]", "def deduce_path(self, id, ns, path):\n locations = self.id_lookups[ns][id]\n if path != '':\n if path not in locations.keys():\n print \"** Error\"\n print \"Specified path '%s' not in name space '%s' locations for id '%s'\" % (path, ns, id)\n traceback.print_stack()\n sys.exit(1)\n else:\n if len(locations) > 1:\n print \"** Error\"\n print \"Path not specified for '%s', but must be since\" %id\n print \" there are multiple locations:\" + \", \".join(locations.keys())\n traceback.print_stack()\n sys.exit(1)\n path = locations.keys()[0]\n return path", "def get_operation_id_number(value: str) -> str:\n return value.split('/')[-1]", "def path_to_htid(path):\n\n # Drop the leading path.\n filename = os.path.split(path)[-1]\n\n # Split the library code and record id.\n lib_code, rec_id = filename.split('.', maxsplit=1)\n\n # Drop any file extensions.\n rec_id = rec_id.split('.', 1)[0]\n\n # Undo the remaining character substitutions.\n rec_id = rec_id.replace('+', ':').replace('=', '/').replace(',', '.')\n\n # Reunite the library code and record id.\n return f'{lib_code}.{rec_id}'", "def get_id(self, resource):\n try:\n return resource.href.split('/')[-1]\n except AttributeError:\n return resource['href'].split('/')[-1]", "def get_path(self, path_id):\n\t\tpass", "def protocol(self, code):\n return self.url.scheme", "def parse_id(string):\n return string.split('/')[-1]", "def __find_protocol(self, url):\n match = self.__REGEX_SCHEMA.search(url)\n if match:\n protocol = match.group(0).split(':')[0]\n return protocol\n return None", "def parse_link_to_id(self, playlist_link: str) -> str:\n split_1 = playlist_link.split('/')[4]\n split_2 = split_1.split('?')\n return split_2[0]", "def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id", "def get_object_id(path):\n return str.split(os.path.basename(path), \"_\")[1][0]", "def orig_path(self):\n return self._path[0]", "def path_name(self, path):\r\n ind = path.rfind(\"/\") + 1\r\n return (path[:ind], path[ind:])", "def get_part_number(part_device_path):\n part_num = \"\"\n if 'by-path' in part_device_path or 'by-id' in part_device_path:\n part_num = re.match('.*?([0-9]+)$', part_device_path).group(1)\n return part_num", "def _getForwardURL(self, path:str) -> str:\n\t\tLogging.logDebug(path)\n\t\tr, pe = CSE.remote.getCSRFromPath(path)\n\t\tLogging.logDebug(str(r))\n\t\tif r is not None and (poas := r.poa) is not None and len(poas) > 0:\n\t\t\treturn f'{poas[0]}/~/{\"/\".join(pe[1:])}'\t# TODO check all available poas.\n\t\treturn None", "def protocol(self):\n return self._info.next # pylint: disable=E1101", "def getProtocol(self) -> str:\n ...", "def parse_managed_path(path):\n fields = path.split(':', 1)\n return fields[0], fields[1]" ]
[ "0.6219566", "0.5965659", "0.5783188", "0.5747099", "0.5634188", "0.5616808", "0.56127423", "0.55606073", "0.5552654", "0.5540661", "0.55257887", "0.5493753", "0.54796386", "0.54419166", "0.54330134", "0.5403448", "0.53902686", "0.53836745", "0.5362701", "0.5354733", "0.5326924", "0.5320347", "0.5319942", "0.5306683", "0.5292473", "0.52901155", "0.5240732", "0.5220825", "0.5214722", "0.5201582" ]
0.65887743
0
Appends a uuid to each of the protocol id's in the path
def append_uuid(self, uuid): if self.is_global: # Don't append uuids to global paths. return property_name, protocol_ids = ProtocolPath.to_components(self._full_path) appended_ids = [] for protocol_id in protocol_ids: if protocol_id is None: continue appended_id = graph.append_uuid(protocol_id, uuid) appended_ids.append(appended_id) self._from_components(property_name, *appended_ids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_uuid(self, value):\n if self.id.find(value) >= 0:\n return\n\n self._id = graph.append_uuid(self.id, value)\n\n for input_path in self.required_inputs:\n\n input_path.append_uuid(value)\n\n value_references = self.get_value_references(input_path)\n\n for value_reference in value_references.values():\n value_reference.append_uuid(value)\n\n for output_path in self.provided_outputs:\n output_path.append_uuid(value)", "def make_id_path(base_path, id_) -> Path:\n\n return base_path / (ID_FMT.format(id=id_))", "def build_path(self, context):\n if not self._uuid:\n raise ValueError(\"Descriptor UUID not initialized\")\n\n parts = self.build_container_path_parts(context) \n parts.append(self._uuid)\n self._path = '/'.join(map(str, parts))\n return self._path", "def vios_uuids(self):\n raise NotImplementedError()", "def appendPath(paths: List[unicode]) -> unicode:\n ...", "def _add_path(dir_name, payload_info_list):\n path_count_dict = {}\n for payload_info_dict in payload_info_list:\n file_name = payload_info_dict[\"filename\"] or payload_info_dict[\"pid\"]\n path = d1_common.utils.filesystem.gen_safe_path(dir_name, \"data\", file_name)\n path_count_dict.setdefault(path, 0)\n path_count_dict[path] += 1\n if path_count_dict[path] > 1:\n path_base, path_ext = os.path.splitext(path)\n path = \"{}({}){}\".format(path_base, path_count_dict[path], path_ext)\n payload_info_dict[\"path\"] = path", "def build_path_device(device_id):\n padding_device = PAIRS_SHINGLES_DEVICE * 2\n s = padding_zeroes(int(int(device_id) % NUMBER_DEVICES), padding_device)\n res = ''\n for i in range(0, padding_device, 2):\n res += s[i: i+2] + '/'\n return res", "def _get_id(self):\n self.unix_time = self.__get_unix_now()\n self.mac_addr = self.__get_random_12_str() # self.__get_mac_address()\n self.pid_hex = self.__get_pid()\n self.sequence_num = self.__get_sequence_number()\n\n # uuid format: 8 + 4 + 4 + 4 + 12\n _tmp_list = [self.unix_time[:8],\n self.unix_time[8:],\n self.mac_addr[:4],\n self.mac_addr[4:8],\n self.mac_addr[8:] + self.pid_hex + self.sequence_num]\n return _tmp_list", "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def construct_path(id_val):\n id_val = str(id_val)\n path = id_val[:3] + \"/\" + id_val[3:6] + \"/\" + id_val[6:9] + \"/\"\n path += id_val\n return path", "def _find_id_path(self, id=None, path=None):\n i = self._get_icalendar_component(assert_one=False)\n if not id and getattr(self, \"id\", None):\n id = self.id\n if not id:\n id = i.pop(\"UID\", None)\n if id:\n id = str(id)\n if not path and getattr(self, \"path\", None):\n path = self.path\n if id is None and path is not None and str(path).endswith(\".ics\"):\n id = re.search(\"(/|^)([^/]*).ics\", str(path)).group(2)\n if id is None:\n id = str(uuid.uuid1())\n i.pop(\"UID\", None)\n i.add(\"UID\", id)\n\n self.id = id\n\n for x in self.icalendar_instance.subcomponents:\n if not isinstance(x, icalendar.Timezone):\n error.assert_(x.get(\"UID\", None) == self.id)\n\n if path is None:\n path = self.generate_url()\n else:\n path = self.parent.url.join(path)\n\n self.url = URL.objectify(path)", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def uuid(self):\n raise NotImplementedError", "def prepend_protocol_id(self, id_to_prepend):\n property_name, protocol_ids = ProtocolPath.to_components(self._full_path)\n\n if len(protocol_ids) == 0 or (len(protocol_ids) > 0 and protocol_ids[0] != id_to_prepend):\n protocol_ids.insert(0, id_to_prepend)\n\n self._from_components(property_name, *protocol_ids)", "def uuid(self, value):\n self.unique_id = UUID(str(value)).hex", "def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return", "def svn_client_uuid_from_path(char_uuid, char_path, svn_wc_adm_access_t_adm_access, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def addUniqueId(self):\n self.attributes[\"id\"] = \"H_%d\" % Element._idCounter\n Element._idCounter = Element._idCounter + 1", "def write_unique_ids(self, out_file):\n with open(out_file,'w') as f:\n f.writelines([self.prefix+x+'\\n' for x in self.unique_ids])\n return", "def _add_unique_id_fields(self):\r\n field_types = {\"String\": \"TEXT\", \"Single\": \"FLOAT\", \"Double\": \"DOUBLE\", \"SmallInteger\": \"SHORT\",\r\n \"Integer\": \"LONG\", \"OID\": \"LONG\"}\r\n origin_field_def = [self.origin_unique_id_field_name, field_types[self.origin_id_field_obj.type]]\r\n if self.origin_id_field_obj.type == \"String\":\r\n origin_field_def += [self.origin_unique_id_field_name, self.origin_id_field_obj.length]\r\n dest_field_def = [self.dest_unique_id_field_name, field_types[self.dest_id_field_obj.type]]\r\n if self.dest_id_field_obj.type == \"String\":\r\n dest_field_def += [self.dest_unique_id_field_name, self.dest_id_field_obj.length]\r\n self.rt_solver.addFields(arcpy.nax.RouteInputDataType.Stops, [origin_field_def, dest_field_def])", "def create_data_id(datatype, paths=None, add_uuid=True):\n # Need data or added uuid\n fullpath = [\n datatype,\n ]\n if paths:\n fullpath.extend(list(paths))\n if add_uuid:\n fullpath.append(str(uuid.uuid1()))\n if not fullpath:\n raise Exception(\"No path created\")\n return \"http://earthscope.org/%s/\" % \"/\".join(fullpath)", "def generate_uri(uri):\n return uri[:-5] + uuid.uuid4().hex", "def _get_path_infomation(self):\n long_identifier = self._device_path.split('/')[4]\n protocol, remainder = long_identifier.split('-', 1)\n identifier, _, device_type = remainder.rsplit('-', 2)\n return (protocol, identifier, device_type)", "def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)", "def _generate_urls(base_url, mbid):\n for level in LEVELS:\n yield base_url + mbid + level", "def uuid( *args ):\n t = long( time.time() * 1000 )\n r = long( random.random()*100000000000000000L )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random()*100000000000000000L\n data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n data = hashlib.md5(data).hexdigest()\n return data", "def _from_components(self, property_name, *protocol_ids):\n\n assert property_name is not None and isinstance(property_name, str)\n\n assert property_name.find(ProtocolPath.path_separator) < 0\n\n for protocol_id in protocol_ids:\n\n assert protocol_id is not None and isinstance(protocol_id, str)\n\n assert protocol_id.find(ProtocolPath.property_separator) < 0 and \\\n protocol_id.find(ProtocolPath.path_separator) < 0\n\n protocol_path = ProtocolPath.path_separator.join(protocol_ids)\n\n if len(protocol_ids) == 0:\n protocol_path = ''\n\n self._full_path = '{}{}{}'.format(protocol_path,\n ProtocolPath.property_separator,\n property_name)", "def uuid(self, value):\n if value is not None:\n self.keystore['id'] = value\n elif 'id' in self.keystore:\n self.keystore.pop('id')" ]
[ "0.5968178", "0.5823053", "0.5382426", "0.534203", "0.5339131", "0.5256109", "0.5218487", "0.521202", "0.5207927", "0.52027667", "0.51832616", "0.51754117", "0.51754117", "0.51754117", "0.51680183", "0.512681", "0.50913817", "0.50849915", "0.5069002", "0.50551116", "0.5046208", "0.503472", "0.5010325", "0.499988", "0.4994533", "0.49730235", "0.49676585", "0.49646586", "0.49632132", "0.49540392" ]
0.7560965
0
Retrieve a dict whose form is {ASIN => item details}. The dict does not contain keys which don't exist in the cache.
def get_cached_items(self, asins): keys = [to_bytes(asin) for asin in asins] # key must be bytes cached_json_items = self.cache.get_multi(keys, key_prefix=self.key_prefix) cached_items = {} for key, value in cached_json_items.items(): # Although pylibmc automatically pickle dicts or objects, # JSON is more portable. # Convert key and value into (Py3) str cached_items[to_str(key)] = json.loads(to_str(value)) return cached_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def read_item(sessionKey):\n\n # Try to get the element from the cache, erasing it if it exists\n payload = c.pop(sessionKey, \"\")\n return {\"payload\": payload}", "def extract_key_item_data(item_data):\n extracted_item_data = {}\n\n for item_id in item_data:\n key_data = {}\n key_data[\"id\"] = item_id\n key_data[\"name\"] = item_data[item_id][\"name\"]\n key_data[\"image\"] = item_data[item_id][\"image\"][\"full\"]\n key_data[\"gold\"] = item_data[item_id][\"gold\"][\"total\"]\n key_data[\"tags\"] = item_data[item_id][\"tags\"]\n extracted_item_data[item_id] = key_data\n \n return extracted_item_data", "def _get_item(dic: dict, keys: list) -> dict:\n\tfor key in keys:\n\t\tdic = dic[key]\n\n\treturn dic", "def get_inventory_from_cache(self):\n cache = open(self.cache_path_cache, 'r')\n json_inventory = cache.read()\n return json_inventory", "def get_dict(self, item: str) -> dict:\n return dict(self._get_subset(item))", "def load_item_map(cache_file):\n with open(cache_file, 'rb') as f:\n full_item_map = pickle.load(f)\n return full_item_map", "def item_dict():\n\n items = {'page': 'pages', 'table': 'tables',\n 'viz': 'vizualisation', 'column': 'columns'}\n return items", "def get_info(self):\n hits, misses, cacheSizeBytes, cacheSize = (\n self.hits,\n self.misses,\n self.__get_cache_size(),\n len(self.__recentAccessed),\n )\n filled = cacheSizeBytes / self.__maxSize\n\n return {\n \"hits\": hits,\n \"misses\": misses,\n \"cacheSize\": {\"bytes\": cacheSizeBytes, \"items\": cacheSize},\n \"filled\": filled,\n }", "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }", "def get_rdap_info_from_cache(ip_address: str) -> dict:\n file_name = rdap_info_cache_file_name(ip_address)\n if os.path.isfile(file_name):\n with open(file_name, 'r') as json_file:\n return json.load(json_file)\n\n return None", "def load_cache():\n return {}", "def _cache(item_label, item_list):\n id_label = item_label + '_id'\n mbid_label = item_label + '_mbid'\n echonest_id_label = item_label + '_echonest_id'\n items = {}\n for item in item_list:\n key = '/%s/%s' % (item_label, item[id_label])\n items[key] = item\n musicbrainz_id = item.get(mbid_label, None)\n if musicbrainz_id:\n items['/musicbrainz/%s/%s' % (item_label, musicbrainz_id)] = key\n # echonest_id = item.get(echonest_id_label, None)\n # if echonest_id:\n # items['/echonest/%s/%s' % (item_label, echonest_id)] = key\n application.config.get('CACHE').set_many(items)", "def _dictview(self) -> TracksDict:\n return self._items[self._keys, self._beads] # type: ignore", "def getCacheDictFromRawData( rawList ):\n\n res = [ ( ( name, sType ), status ) for name, sType, status in rawList ]\n return dict( res )", "def get_item_info(self, item_id):\n request_name = \"get_shop_info\"\n\n items = self.make_request(request_name, url_id=item_id)\n try:\n item = items[0]\n item_dict = dict()\n item_dict[\"id\"] = item[\"@id\"].encode('utf-8')\n item_dict[\"name\"] = item[\"label\"].encode('utf-8')\n item_dict[\"shelf\"] = item[\"shelf\"].encode('utf-8')\n item_dict[\"slot\"] = item[\"slot\"].encode('utf-8')\n item_dict[\"quantity\"] = item[\"quantity\"]\n return item_dict\n except Exception as e:\n print(\"Encountered exception while getting item\", item_id, \"\\n\", str(e))\n return None", "def get_info(self) -> Optional[Dict[str, Any]]:", "def get_banners():\n banners = cache.get('banners')\n if not banners:\n banners = Banner.objects.filter(general=True)\n cache.set('banners', banners)\n return {'banners': banners}", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def item_without_name():\n return {'value':340}", "def return_as_dictionary(self):\n item = Inventory.return_as_dictionary(self)\n item['Brand'] = self.brand\n item['Voltage'] = self.voltage\n return item", "def prettify_invites(self):\n return {k: dict(v) if isinstance(v, defaultdict) else v for (k, v) in self.cached_invites.items()}", "def os_release_items(cls):\n item_dict = {}\n if not os.path.isfile(cls.OS_RELEASE_FILE):\n return item_dict\n\n searched_items = ['ID', 'ID_LIKE']\n with open(cls.OS_RELEASE_FILE) as f_in:\n for line in f_in:\n for item in searched_items:\n pattern = r'^{0}=[\\'\"]?([\\w ]+)?[\\'\"]?$'.format(item)\n match = re.search(pattern, line)\n if match:\n item_dict[item] = match.group(1)\n return item_dict", "def _pack_items(self):\n identifiers = tuple(self.identify_items(self))\n cache_keys = self.make_cache_keys(identifiers)\n cache_items = dict(izip(cache_keys, self))\n self.cache.set_many(cache_items, self.cache_timeout)\n return identifiers", "def gather_cache(self):\n cache = {\"grains\": {}, \"pillar\": {}}\n if self.grains or self.pillar:\n if self.opts.get(\"minion_data_cache\"):\n minions = self.cache.list(\"minions\")\n if not minions:\n return cache\n for minion in minions:\n total = self.cache.fetch(\"minions/{}\".format(minion), \"data\")\n\n if \"pillar\" in total:\n if self.pillar_keys:\n for key in self.pillar_keys:\n if key in total[\"pillar\"]:\n cache[\"pillar\"][minion][key] = total[\"pillar\"][key]\n else:\n cache[\"pillar\"][minion] = total[\"pillar\"]\n else:\n cache[\"pillar\"][minion] = {}\n\n if \"grains\" in total:\n if self.grain_keys:\n for key in self.grain_keys:\n if key in total[\"grains\"]:\n cache[\"grains\"][minion][key] = total[\"grains\"][key]\n else:\n cache[\"grains\"][minion] = total[\"grains\"]\n else:\n cache[\"grains\"][minion] = {}\n return cache", "def get_item_info(self, item, start_time=date.today(), end_time=date.today() + timedelta(days=+1), arg=None):\n ret_info = {}\n if arg is not None:\n index = self._items[item].index(arg)\n assert index != -1\n ret_info[arg] = self.retrive_url(self._linkurl_dict[arg], convert_date_time(start_time), convert_date_time(end_time))\n else:\n\n for req_index in self._items[item]:\n ret_info[req_index] = self.retrive_url(self._linkurl_dict[req_index], convert_date_time(start_time), convert_date_time(end_time))\n return ret_info", "def getCouponPurchaseDict(coupon_file):\n file_handle = open(coupon_file,'rb')\n file_reader = csv.DictReader(file_handle)\n coupon_purchase_dict = {}\n for row in file_reader:\n temp_list = coupon_purchase_dict.get(row['USER_ID_hash'], [])\n temp_list.append(row)\n coupon_purchase_dict[row['USER_ID_hash']] = temp_list\n file_handle.close()\n return coupon_purchase_dict", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def get_item(item_code):\n u = models.Items.query.filter_by(code=item_code).first()\n u_dict = u.__dict__\n item = dict(\n item_code = u_dict['item_code'],\n item_name = u_dict['item_name'],\n size_code = get_size_code( u_dict['size_code']),\n color_code = get_color_code( u_dict['color_code']),\n quality_code = get_quality_code( u_dict['quality_code']),\n cost_price = u_dict['variants']['cost_price'],\n selling_price = u_dict['variants']['selling_price'],\n quantity = u_dict['variants']['quantity']\n )\n return make_response(jsonify(item))", "def get_info(self):\n return {}" ]
[ "0.5769965", "0.5732929", "0.57088935", "0.556562", "0.55456996", "0.5544044", "0.551607", "0.5514118", "0.54914254", "0.54679686", "0.54657054", "0.5446405", "0.54148483", "0.5413959", "0.54104495", "0.5409624", "0.5385252", "0.5321656", "0.53159004", "0.52895033", "0.52829516", "0.52447224", "0.5227108", "0.5214766", "0.5195123", "0.51923454", "0.5161674", "0.5160701", "0.51365143", "0.5099376" ]
0.69177437
0
recursively applies the function fkt to every elemenent and sublist of lst
def recursive_map(fkt, lst): return [recursive_map(fkt,item) if type(item) is list else fkt(item) for item in lst]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_function_to_nested_list(func, l):\n from itertools import chain\n result = func(list(chain(*l)))\n csum = np.cumsum(map(len, l))\n new_l = [result[(0 if i == 0 else csum[i-1]):csum[i]] for i in range(len(l))]\n return new_l", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def flatten(lst):\n\n for x in lst:\n if isinstance(x, list):\n for x in flatten(x):\n yield x\n else:\n yield x", "def sum_list(lst):\n\n if lst == []:\n return 0\n else:\n return lst[0] + sum_list(lst[1:])", "def flatten(lst):\n \n for x in lst:\n if isinstance(x, list):\n for x in flatten(x):\n yield x\n else:\n yield x", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def flatten(lst):\n \"*** YOUR CODE HERE ***\"", "def apply_tree(tree: dict, func: Callable, args: Optional[Tuple] = None, kwargs: Optional[Mapping] = None) -> None:\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n func(item, *args, **kwargs)\n explored.add(uid)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result", "def recursive_map(x, function):\n if isinstance(x, (list, tuple, set)):\n t = type(x)\n return t(map(lambda e: recursive_map(e, function), x))\n else:\n return function(x)", "def flatten_me(lst, new_lst=None):\n if new_lst is None:\n new_lst = []\n\n for item in lst:\n if isinstance(item, list):\n flatten_me(item, new_lst)\n else:\n new_lst.append(item)\n\n return new_lst", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def apply(self, func):\n ret = [func(self)]\n for _, node in self.children.items():\n ret.extend(node.apply(func))\n return ret", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])", "def count_recursively(lst):\n\n if lst == []:\n return 0\n \n return 1 + count_recursively(lst[1:])", "def inorderTraverse_itr(self, root, lst):\r\n if not root:\r\n return\r\n\r\n cur = root\r\n stk = []\r\n while stk or cur:\r\n while cur:\r\n stk.append(cur)\r\n cur = cur.left\r\n cur = stk.pop() # left_most\r\n lst.append(cur.val)\r\n cur = cur.right\r\n # if cur.right: # should go to next iteration\r\n # cur = cur.right\r\n # stk.append(cur)\r", "def map(self, f: Callable[[Any], Any]) -> RecursiveList:\n # If empty, return empty list\n if self.is_empty():\n return RecursiveList([])\n else:\n # Apply f to the first element and make a new list to return\n rl = RecursiveList([f(self._first)])\n # Map the rest of the list and set it to rl's _rest\n rl._rest = self._rest.map(f) # recursive call\n return rl", "def recursive_mapWithDepth(x, function, apply_depth):\n if count_nest(x) == apply_depth:\n return function(x)\n else:\n if isinstance(x, (list, tuple, set)):\n t = type(x)\n return t(map(lambda e: recursive_mapWithDepth(e, function, apply_depth), x))", "def recursive_sum(lst):\n\n if lst == []:\n return 0\n\n else:\n\n return lst[0] + recursive_sum(lst[1:])", "def flatten_list(lst):\n assert isinstance(lst, list), \"you didn't pass a list!\"\n\n if isinstance(lst[0], list):\n if len(lst[0])>1:\n return ['-'.join(i) for i in lst] # then its a kpoints list\n return flatten_list([i[0] for i in lst])\n else:\n return [i for i in lst]", "def recurrent_sum_of_elements_in_list(lst):\n if len(lst) == 0:\n return 0\n elif len(lst) == 1:\n return lst[0]\n return lst[0] + recurrent_sum_of_elements_in_list(lst[1:])", "def count_recursively(lst):\n\n if len(lst) == 0: \n return 0\n else: \n return 1 + count_recursively(lst[1:])", "def multiply_list(lst):\n if lst == []:\n return 1\n else:\n return lst[0] * multiply_list(lst[1:])", "def _recursive(inputlst, dimsizes):\n if len(dimsizes) == 1:\n return list(inputlst)\n\n outerlist = []\n chunksize = len(inputlst)//dimsizes[0]\n for i in range(0, len(inputlst), chunksize):\n chunk = inputlst[i:i+chunksize]\n outerlist.append(_recursive(chunk, dimsizes[1:]))\n\n return outerlist", "def lis_recursive(array):\n\n #TODO", "def accumulate(lst):\r\n prev = 0\r\n for i in range(len(lst)):\r\n if isinstance(lst[i], list):\r\n inside = accumulate(lst[i])\r\n prev = inside + prev\r\n else:\r\n prev = lst[i] + prev\r\n lst[i] = prev\r\n return lst[-1]", "def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):\n if isinstance(data, (tuple, list)):\n return honor_type(\n data,\n (\n recursively_apply(\n func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n )\n for o in data\n ),\n )\n elif isinstance(data, Mapping):\n return type(data)(\n {\n k: recursively_apply(\n func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n )\n for k, v in data.items()\n }\n )\n elif test_type(data):\n return func(data, *args, **kwargs)\n elif error_on_other_type:\n raise TypeError(\n f\"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of \"\n f\"objects that are valid for `{test_type.__name__}` should be passed.\"\n )\n return data", "def list_stack_rec(lst: list, stk: Stack) -> None:\n for element in lst:\n stk.add(element)\n\n while not stk.is_empty(): # while the stack is not empty\n top_item = stk.remove()\n if isinstance(top_item, list):\n list_stack_rec(top_item, stk) # recursive call\n else:\n print(top_item)", "def scanl(f, base, l):\n yield base\n for x in l:\n base = f(base, x)\n yield base", "def walk_values_rec(f, coll):\n if is_mapping(coll):\n return f(walk_values(walk_values_rec(f), coll))\n elif is_list(coll):\n return f(list(map(walk_values_rec(f), coll)))\n else:\n return f(coll)" ]
[ "0.70438933", "0.6275951", "0.58420646", "0.5804727", "0.5795173", "0.57414764", "0.57273996", "0.56959635", "0.568941", "0.56869394", "0.56300324", "0.56258065", "0.56181127", "0.5582306", "0.55785596", "0.5575358", "0.55268943", "0.5515409", "0.5505422", "0.548368", "0.5460135", "0.54489404", "0.5439994", "0.5427778", "0.5403835", "0.5401769", "0.5400442", "0.5395187", "0.53894985", "0.53596854" ]
0.7568617
0
Formats a period in seconds into a string
def format_period(seconds): if seconds == (24 * 60 * 60): return "day" elif seconds == (60 * 60): return "hour" elif seconds > (24 * 60 * 60): return "%.1f days" % (seconds / 24. / 60. / 60.) elif seconds > (60 * 60): return "%.1f hours" % (seconds / 60. / 60.) elif seconds > (60): return "%.1f minutes" % (seconds / 60.) else: return "%.0f seconds" % seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_seconds(s):\n return '%dh %dm' % (s//3600, (s//60) % 60)", "def format_seconds(duration):\n\treturn stats_utils.format_seconds(duration)", "def format_time(seconds: float) -> str:\n # return str(timedelta(seconds=seconds))[2:10] if seconds != 0.0 else \"00:00.00\"\n if seconds == 0.0:\n return \"00.00\"\n elif seconds < 60.0:\n return str(timedelta(seconds=seconds))[5:10] # SS:DD, d decimal\n else:\n return str(timedelta(seconds=seconds))[2:7] # MM:SS", "def format(t):\n tenths = t % 10\n seconds = (t // 10) % 60\n minutes = (t // 10) // 60\n if seconds < 10:\n seconds_str = '0' + str(seconds)\n else:\n seconds_str = str(seconds)\n t_string = str(minutes) + ':' + seconds_str + '.' + str(tenths)\n return t_string", "def _format_time(seconds):\n hrs = seconds // 3600\n seconds -= 3600 * hrs\n mins = seconds // 60\n seconds -= 60 * mins\n return '%02dh%02dm%02ds' % (hrs, mins, seconds)", "def format_duration(seconds: float) -> str:\n if seconds < 60:\n return f'{seconds:.0f}s'\n elif seconds < 3600:\n minutes = math.floor(seconds / 60)\n seconds -= minutes * 60\n return f'{minutes}m{seconds:.0f}s'\n else:\n hours = math.floor(seconds / 3600)\n seconds -= hours * 3600\n minutes = math.floor(seconds / 60)\n seconds -= minutes * 60\n return f'{hours}h{minutes}m{seconds:.0f}s'", "def format_seconds(num_sec):\n if num_sec < 60:\n return \"%s %s\" % (format_num(num_sec), pluralize(num_sec, \"second\"))\n return format_millis(num_sec * 1000)", "def format_seconds(num_seconds: Union[int, float]) -> str:\n # todo: maybe negative numbers should be in brackets? e.g. \"(-2.34 days)\"\n # todo: how to handle up to centuries/millenia but also default back to years for weird numbers? maybe just a special cast for near whole numbers of centuries?\n # todo: extend down to picoseconds, or planck time for fun\n # todo: handle inf and -inf, and probably nan too\n # todo: handle decimal?\n\n # handle negatives\n if num_seconds < 0:\n minus = '-'\n else:\n minus = ''\n num_seconds = abs(num_seconds)\n\n # zero (not compatible with decimals below)\n if num_seconds == 0:\n return '0 seconds'\n\n # 1 or more seconds\n if num_seconds >= 1:\n unit = 0\n denominators = [60.0, 60.0, 24.0, 7.0, 365.25 / 84.0, 12.0, 10.0]\n while unit < 6 and num_seconds > denominators[unit] * 0.9:\n num_seconds /= denominators[unit]\n unit += 1\n unit_str = ['seconds', 'minutes', 'hours', 'days', 'weeks', 'months', 'years', 'decades'][unit]\n\n # singular form\n if num_seconds == 1:\n unit_str = unit_str[:-1]\n\n # exact or float\n if num_seconds % 1:\n return f'{minus}{num_seconds:,.2f} {unit_str}'\n else:\n return f'{minus}{num_seconds:,.0f} {unit_str}'\n\n # fractions of a second (ms, μs, ns)\n else:\n unit = 0\n while unit < 3 and num_seconds < 0.9:\n num_seconds *= 1000\n unit += 1\n unit = ['seconds', 'milliseconds', 'microseconds', 'nanoseconds'][unit]\n\n # singular form\n if num_seconds == 1:\n unit = unit[:-1]\n\n # exact or float\n if num_seconds % 1 and num_seconds > 1:\n return f'{minus}{num_seconds:,.2f} {unit}'\n elif num_seconds % 1:\n # noinspection PyStringFormat\n num_seconds = f'{{N:,.{1 - int(math.floor(math.log10(abs(num_seconds))))}f}}'.format(N=num_seconds)\n return f'{minus}{num_seconds} {unit}'\n else:\n return f'{minus}{num_seconds:,.0f} {unit}'", "def format(t):\r\n \r\n tenths_of_seconds = t % 10\r\n t /= 10\r\n seconds = t % 60\r\n minutes = t / 60\r\n \r\n # Format String: http://www.codeskulptor.org/docs.html#string-formatting\r\n formatted_time = '%(minutes)0d:%(seconds)02d.%(ts)0d' % \\\r\n {\r\n \"minutes\": minutes, \r\n \"seconds\": seconds, \r\n \"ts\": tenths_of_seconds\r\n }\r\n \r\n return formatted_time", "def format_time(t: float):\n if t >= 60:\n return f\"{round(t / 60.0, 2)} mins\"\n else:\n return f\"{round(t, 2)} secs\"", "def format(time_in_seconds):\n \"\"\" in tenths of seconds into formatted string A:BC.D \"\"\"\n # A ten of minutes \n A = time_in_seconds // 600\n Intermediate_Operation = time_in_seconds // 10\n amount_of_seconds = Intermediate_Operation %60\n # B Unity of minutes\n B = amount_of_seconds // 10\n # C ten of seconds\n C = amount_of_seconds % 10\n # D unity of seconds\n D = time_in_seconds % 10\n return str(A)+\":\"+str(B)+str(C)+\".\"+str(D)", "def format_duration(seconds, max_fields=3):\n seconds = float(seconds)\n s = int(seconds % 60)\n m = int((seconds / 60) % 60)\n h = int((seconds / 3600) % 24)\n d = int(seconds / 86400)\n\n fields = []\n for unit, value in zip(['d', 'h', 'm', 's'], [d, h, m, s]):\n if len(fields) > 0: # If it's not the first value, pad with 0s\n fields.append('{}{}'.format(str(value).rjust(2, '0'), unit))\n elif value > 0: # If there are no existing values, we don't add this unit unless it's >0\n fields.append('{}{}'.format(value, unit))\n\n fields = fields[:max_fields]\n\n # If the time was less than a second, we just return '<1s' TODO: Maybe return ms instead?\n if len(fields) == 0:\n fields.append('<1s')\n\n return ''.join(fields)", "def format_seconds(total_sec):\n delt = timedelta(seconds=total_sec)\n days = delt.days\n hours, rem = divmod(delt.seconds, 3600)\n minutes, seconds = divmod(rem, 60)\n if seconds == 0:\n return '<1 s'\n s = '%s s' % seconds\n if minutes:\n s = '%d m ' % minutes + s\n if hours:\n s = '%d h ' % hours + s\n if days:\n s = '%d d ' % days + s\n return s", "def secs_to_str(secs):\n SECS_PER_MIN = 60\n SECS_PER_HR = SECS_PER_MIN * 60\n SECS_PER_DAY = SECS_PER_HR * 24\n\n if secs < SECS_PER_MIN: if secs == 1: return '1 second'\n else:\n return '{} seconds'.format(secs)\n if secs < SECS_PER_HR:\n mins = secs // SECS_PER_MIN\n if mins == 1:\n return '1 minute'\n else:\n return '{} minutes'.format(mins)\n if secs < SECS_PER_DAY:\n hrs = secs // SECS_PER_HR\n if hrs == 1:\n return '1 hour'\n else:\n return '{} hours'.format(hrs)\n days = secs // SECS_PER_DAY\n if days == 1:\n return '1 day'\n else:\n return '{} days'.format(secs // SECS_PER_DAY)", "def time_string(time_f: float) -> str:\n m, s = divmod(time_f, 60)\n h, m = divmod(m, 60)\n\n if h < 1:\n if m < 1 and s < 1:\n msec = int(s * 1000)\n return '{:=03d}msec'.format(msec)\n\n if m < 1:\n return '{:=02.0f}sec'.format(s)\n\n return '{:=02.0f}min:{:=02.0f}sec'.format(m, s)\n else:\n return '{:=01.0f}h:{:=02.0f}min:{:=02.0f}sec'.format(h, m, s)", "def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num", "def seconds_to_str(seconds):\n (hours, remainder) = divmod(seconds, 3600)\n (minutes, seconds) = divmod(remainder, 60)\n return \"h{}m{}s{}\".format(int(hours), int(minutes), float(seconds))", "def format_duration(seconds: int) -> str:\n\n if seconds == 0:\n return 'now'\n\n result = ''\n\n years = calc_years(seconds)\n days = calc_days(seconds)\n hours = calc_hours(seconds)\n minutes = calc_minutes(seconds)\n seconds = calc_seconds(seconds)\n\n if years > 0:\n result += '{}'.format(get_string(years, 'year'))\n\n if days > 0:\n if result != '':\n result += ', {}'.format(get_string(days, 'day'))\n else:\n result += '{}'.format(get_string(days, 'day'))\n\n if hours > 0:\n if result != '':\n result += ', {}'.format(get_string(hours, 'hour'))\n else:\n result += '{}'.format(get_string(hours, 'hour'))\n\n if minutes > 0:\n if result != '':\n if seconds == 0:\n result += ' and {}'.format(get_string(minutes, 'minute'))\n else:\n result += ', {}'.format(get_string(minutes, 'minute'))\n else:\n result += '{}'.format(get_string(minutes, 'minute'))\n\n if seconds > 0:\n if result != '':\n result += ' and {}'.format(get_string(seconds, 'second'))\n else:\n result += '{}'.format(get_string(seconds, 'second'))\n\n return result", "def human_seconds(seconds, fmt=\"%.3g %s\"):\n t = 1e6 * seconds # start with µsec\n for suff in \"usec msec\".split():\n if t < 1000:\n return fmt % (t, suff)\n t /= 1000\n return fmt % (t, \" sec\")", "def timestr(msec):\n sec = float(msec) / 1000\n\n hours = int(sec / 3600)\n sec -= hours * 3600\n\n minutes = int(sec / 60)\n sec -= minutes * 60\n\n return f\"{hours:02d}:{minutes:02d}:{sec:06.3f}\".replace(\".\", \",\")", "def time_to_string(value):\n if value == gst.CLOCK_TIME_NONE:\n return \"--:--:--.---\"\n ms = value / gst.MSECOND\n sec = ms / 1000\n ms = ms % 1000\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n return \"%02d:%02d:%02d.%03d\" % (hours, mins, sec, ms)", "def printTime(t):\n if t < 2 * MINUTE:\n return \"%d seconds\" % (t / SECOND)\n if t < 5 * HOUR:\n return \"%d minutes\" % (t / MINUTE)\n if t < 3 * DAY:\n return \"%d hours\" % (t / HOUR)\n if t < YEAR:\n return \"%d days\" % (t / DAY)\n if (t % YEAR) == 0:\n return \"%d years\" % (t / YEAR)\n else:\n return \"%5.1f years\" % (t / YEAR)", "def seconds_to_string(seconds):\n # make sure in-between values are not hidden\n can_hide = True\n\n # days\n days = int(floor(seconds / 86400))\n days_text = str(days) + 'd '\n if days < 1:\n days_text = ''\n else:\n can_hide = False\n remaining = fmod(seconds, 86400)\n\n # hours\n hours = int(floor(remaining / 3600))\n hours_text = str(hours) + 'h '\n if hours < 1 and can_hide:\n hours_text = ''\n else:\n can_hide = True\n remaining = fmod(remaining, 3600)\n\n # minutes\n minutes = int(floor(remaining / 60))\n minutes_text = str(minutes) + 'min '\n if minutes < 1 and can_hide:\n minutes_text = ''\n remaining = fmod(remaining, 60)\n\n # seconds\n seconds = round(remaining, 2)\n\n return days_text + hours_text + minutes_text + str(seconds) + 's'", "def format_time(value: float) -> str:\n if value <= 0.01:\n return f\"{value * 1000000:.0f}us\"\n elif value <= 0.1:\n return f\"{value * 1000:.1f}ms\"\n elif value > 172800:\n return f\"{value / 86400:.2f}days\"\n elif value > 86400:\n return f\"{value / 86400:.2f}day\"\n elif value > 1800:\n return f\"{value / 3600:.2f}hr\"\n elif value > 60:\n return f\"{value / 60:.2f}min\"\n return f\"{value:.2f}s\"", "def str_timedelta(seconds):\n\n hour_minute = \"\"\n\n if seconds > 3600:\n hour_minute += str(int(seconds / 3600.0)) + \" h \"\n\n if seconds > 60:\n hour_minute += str(int(seconds / 60) % 60) + \" min \"\n\n return hour_minute + str(seconds % 60) + \" s\"", "def format_time(s):\n s = int(s)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n return f'{h:02d}h{m:02d}m{s:02d}s'", "def format_interval(seconds):\n units = [\n ((\"week\", \"weeks\"), 604800),\n ((\"day\", \"days\"), 86400),\n ((\"hour\", \"hours\"), 3600),\n ((\"minute\", \"minutes\"), 60),\n # (('second', 'seconds'), 1)\n ]\n result = []\n for names, value in units:\n num, seconds = divmod(seconds, value)\n if num > 0:\n result.append(\"%d %s\" % (num, names[num > 1]))\n if seconds:\n result.append(\"%.2f %s\" % (seconds, [\"second\", \"seconds\"][seconds != 1.0]))\n return \", \".join(result)", "def pretty_print_seconds(self, num_seconds):\r\n # Here _ is the N variant ungettext that does pluralization with a 3-arg call\r\n _ = self.runtime.service(self, \"i18n\").ungettext\r\n hours = num_seconds // 3600\r\n sub_hour = num_seconds % 3600\r\n minutes = sub_hour // 60\r\n seconds = sub_hour % 60\r\n display = \"\"\r\n if hours > 0:\r\n display += _(\"{num_hour} hour\", \"{num_hour} hours\", hours).format(num_hour=hours)\r\n if minutes > 0:\r\n if display != \"\":\r\n display += \" \"\r\n # translators: \"minute\" refers to a minute of time\r\n display += _(\"{num_minute} minute\", \"{num_minute} minutes\", minutes).format(num_minute=minutes)\r\n # Taking care to make \"0 seconds\" instead of \"\" for 0 time\r\n if seconds > 0 or (hours == 0 and minutes == 0):\r\n if display != \"\":\r\n display += \" \"\r\n # translators: \"second\" refers to a second of time\r\n display += _(\"{num_second} second\", \"{num_second} seconds\", seconds).format(num_second=seconds)\r\n return display", "def seconds2str(seconds):\n\n seconds = abs(seconds)\n days = hours = minutes = 0\n\n if seconds >= 86400:\n days = seconds / 86400\n seconds = (days - int(days)) * 86400\n\n if seconds >= 3600:\n hours = seconds / 3600\n seconds = (hours - int(hours)) * 3600\n\n if seconds >= 60:\n minutes = seconds / 60\n seconds = (minutes - int(minutes)) * 60\n\n strtime = \"\"\n strtime += f\"{int(days)}d\" if days else \"\"\n strtime += f\"{int(hours)}h\" if hours else \"\"\n strtime += f\"{int(minutes)}m\" if minutes else \"\"\n strtime += f\"{round(seconds)}s\" if seconds else \"\"\n\n return strtime if strtime else \"0s\"", "def format_duration_ms(ms):\n def _format(d):\n d = str(d)\n if len(d) == 1:\n d = '0' + d\n return d\n\n s = int(ms / 1000)\n if s < 60:\n return '00:{}'.format(_format(s))\n\n m, s = divmod(s, 60)\n return '{}:{}'.format(_format(m), _format(s))" ]
[ "0.74343485", "0.73177636", "0.730745", "0.7167537", "0.70485187", "0.70414513", "0.7032614", "0.7026307", "0.69955707", "0.6857048", "0.68486637", "0.67831635", "0.67334914", "0.67275393", "0.67202556", "0.6701395", "0.66790986", "0.6652973", "0.66259664", "0.65982836", "0.6567135", "0.6556766", "0.65524006", "0.6548398", "0.65326804", "0.6529343", "0.6517842", "0.65115273", "0.650469", "0.6478319" ]
0.7953767
0
Syncs tc, db, and bwmon.pickle. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit.
def sync(nmdbcopy): # Defaults global DB_FILE, \ period, \ default_MaxRate, \ default_Maxi2Rate, \ default_MaxKByte,\ default_Maxi2KByte,\ default_Share, \ dev_default # All slices names = [] # In case the limits have changed. default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000) default_Maxi2Rate = int(bwlimit.bwmax / 1000) # Incase default isn't set yet. if default_MaxRate == -1: default_MaxRate = 1000000 # xxx $Id$ # with svn we used to have a trick to detect upgrades of this file # this has gone with the move to git, without any noticeable effect on operations though try: f = open(DB_FILE, "r+") logger.verbose("bwmon: Loading %s" % DB_FILE) (version, slices, deaddb) = pickle.load(f) f.close() # Check version of data file if version != "$Id$": logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE)) raise Exception except Exception: version = "$Id$" slices = {} deaddb = {} # Get/set special slice IDs root_xid = bwlimit.get_xid("root") default_xid = bwlimit.get_xid("default") # Since root is required for sanity, its not in the API/plc database, so pass {} # to use defaults. if root_xid not in slices.keys(): slices[root_xid] = Slice(root_xid, "root", {}) slices[root_xid].reset({}, {}) # Used by bwlimit. pass {} since there is no rspec (like above). if default_xid not in slices.keys(): slices[default_xid] = Slice(default_xid, "default", {}) slices[default_xid].reset({}, {}) live = {} # Get running slivers that should be on this node (from plc). {xid: name} # db keys on name, bwmon keys on xid. db doesnt have xid either. for plcSliver in nmdbcopy.keys(): live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver] logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__()) logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__()) # Get actual running values from tc. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__()) # The dat file has HTBs for slices, but the HTBs aren't running nohtbslices = set(slices.keys()) - set(kernelhtbs.keys()) logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__()) # Reset tc counts. for nohtbslice in nohtbslices: if live.has_key(nohtbslice): slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] ) else: logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice) del slices[nohtbslice] # The dat file doesnt have HTB for the slice but kern has HTB slicesnodat = set(kernelhtbs.keys()) - set(slices.keys()) logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__()) for slicenodat in slicesnodat: # But slice is running if live.has_key(slicenodat): # init the slice. which means start accounting over since kernel # htb was already there. slices[slicenodat] = Slice(slicenodat, live[slicenodat]['name'], live[slicenodat]['_rspec']) # Get new slices. # Slices in GetSlivers but not running HTBs newslicesxids = set(live.keys()) - set(kernelhtbs.keys()) logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__()) # Setup new slices for newslice in newslicesxids: # Delegated slices dont have xids (which are uids) since they haven't been # instantiated yet. if newslice != None and live[newslice].has_key('_rspec') == True: # Check to see if we recently deleted this slice. if live[newslice]['name'] not in deaddb.keys(): logger.log( "bwmon: new slice %s||wangyang add1041" % live[newslice]['name'] ) # _rspec is the computed rspec: NM retrieved data from PLC, computed loans # and made a dict of computed values. slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec']) slices[newslice].reset( {}, live[newslice]['_rspec'] ) # Double check time for dead slice in deaddb is within 24hr recording period. elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)): deadslice = deaddb[live[newslice]['name']] logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name']) slices[newslice] = deadslice['slice'] slices[newslice].xid = newslice # Start the HTB newvals = {"maxrate": deadslice['slice'].MaxRate * 1000, "minrate": deadslice['slice'].MinRate * 1000, "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000, "usedbytes": deadslice['htb']['usedbytes'] * 1000, "usedi2bytes": deadslice['htb']['usedi2bytes'], "share":deadslice['htb']['share']} slices[newslice].reset(newvals, live[newslice]['_rspec']) # Bring up to date slices[newslice].update(newvals, live[newslice]['_rspec']) # Since the slice has been reinitialed, remove from dead database. del deaddb[deadslice['slice'].name] del newvals else: logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name']) # Move dead slices that exist in the pickle file, but # aren't instantiated by PLC into the dead dict until # recording period is over. This is to avoid the case where a slice is dynamically created # and destroyed then recreated to get around byte limits. deadxids = set(slices.keys()) - set(live.keys()) logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2)) for deadxid in deadxids: if deadxid == root_xid or deadxid == default_xid: continue logger.log("bwmon: removing dead slice %s " % deadxid) if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid): # add slice (by name) to deaddb logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name) deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]} del slices[deadxid] if kernelhtbs.has_key(deadxid): logger.verbose("bwmon: Removing HTB for %s." % deadxid) bwlimit.off(deadxid, dev = dev_default) # Clean up deaddb for deadslice in deaddb.keys(): if (time.time() >= (deaddb[deadslice]['slice'].time + period)): logger.log("bwmon: Removing dead slice %s from dat." \ % deaddb[deadslice]['slice'].name) del deaddb[deadslice] # Get actual running values from tc since we've added and removed buckets. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__()) # Update all byte limites on all slices for (xid, slice) in slices.iteritems(): # Monitor only the specified slices if xid == root_xid or xid == default_xid: continue if names and name not in names: continue if (time.time() >= (slice.time + period)) or \ (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \ (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes): # Reset to defaults every 24 hours or if it appears # that the byte counters have overflowed (or, more # likely, the node was restarted or the HTB buckets # were re-initialized). slice.reset(kernelhtbs[xid], live[xid]['_rspec']) elif ENABLE: logger.verbose("bwmon: Updating slice %s" % slice.name) # Update byte counts slice.update(kernelhtbs[xid], live[xid]['_rspec']) logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE)) f = open(DB_FILE, "w") pickle.dump((version, slices, deaddb), f) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_entries():\n import time\n\n while True:\n try:\n update_pending_scripts(settings['api_handler'])\n except:\n logging.exception(\"Error occured during synchronisation\")\n time.sleep(60)", "def main():\n\tlogger.warn(\"leprechaun rsync started.\")\n\n\ttime.sleep(1)\n\tts_rsync()\n\tsqlite_rsync()\n\n\twhile True:\n\t\ttry:\n\t\t\t# schedule crash guard:\n\t\t\timportlib.reload(schedule)\n\t\t\tschedule.every(1301).seconds.do(ts_rsync)\n\t\t\tschedule.every(300).seconds.do(sqlite_rsync)\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tschedule.run_pending()\n\t\t\t\texcept Exception as ex:\n\t\t\t\t\tlogger.exception(ex)\n\t\t\t\t\ttime.sleep(2)\n\t\texcept Exception as ex:\n\t\t\tlogger.exception(ex)\n\t\t\ttime.sleep(2)", "def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):\n # Prepare message parameters from the template\n message = \"\"\n params = {'slice': self.name, 'hostname': socket.gethostname(),\n 'since': time.asctime(time.gmtime(self.time)) + \" GMT\",\n 'until': time.asctime(time.gmtime(self.time + period)) + \" GMT\",\n 'date': time.asctime(time.gmtime()) + \" GMT\",\n 'period': format_period(period)}\n\n if new_maxrate != (self.MaxRate * 1000):\n # Format template parameters for low bandwidth message\n params['class'] = \"low bandwidth\"\n params['bytes'] = format_bytes(usedbytes - self.bytes)\n params['limit'] = format_bytes(self.MaxKByte * 1024)\n params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)\n\n # Cap low bandwidth burst rate\n message += template % params\n logger.log(\"bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s \" % params)\n\n if new_maxexemptrate != (self.Maxi2Rate * 1000):\n # Format template parameters for high bandwidth message\n params['class'] = \"high bandwidth\"\n params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)\n params['limit'] = format_bytes(self.Maxi2KByte * 1024)\n params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)\n\n message += template % params\n logger.log(\"bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s \" % params)\n\n # Notify slice\n if self.emailed == False:\n subject = \"pl_mom capped bandwidth of slice %(slice)s on %(hostname)s\" % params\n if DEBUG:\n logger.log(\"bwmon: \"+ subject)\n logger.log(\"bwmon: \"+ message + (footer % params))\n else:\n self.emailed = True\n logger.log(\"bwmon: Emailing %s\" % self.name)\n slicemail(self.name, subject, message + (footer % params))", "def run(self):\n self.create_all_sync_instances()", "def beat_inbox_email_bulk():\n receipt_id_email, list_of_email_notifications = email_bulk.poll()\n\n while list_of_email_notifications:\n save_emails.apply_async((None, list_of_email_notifications, receipt_id_email), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: email receipt {receipt_id_email} sent to in-flight.\")\n receipt_id_email, list_of_email_notifications = email_bulk.poll()", "def run():\n logger.verbose(\"bwmon: Thread started\")\n while True:\n lock.wait()\n logger.verbose(\"bwmon: Event received. Running.\")\n database.db_lock.acquire()\n nmdbcopy = copy.deepcopy(database.db)\n database.db_lock.release()\n try:\n if getDefaults(nmdbcopy) and len(bwlimit.tc(\"class show dev %s\" % dev_default)) > 0:\n # class show to check if net:InitNodeLimit:bwlimit.init has run.\n sync(nmdbcopy)\n else: logger.log(\"bwmon: BW limits DISABLED.\")\n except: logger.log_exc(\"bwmon failed\")\n lock.clear()", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def sync(self):\n self._start_slow_sync()\n self._ask_for_all_records()\n self._process_events()\n self._process_reminders()\n self._process_recurrences()\n #self._write_events()", "def run(self):\n \n # Loop through all checkers to do an initial state check\n for checker in self.checkers:\n checker.update_last_state()\n\n # Send initial heartbeat\n self._send_heartbeat()\n \n # Main loop\n while True: \n html = \"\"\n for checker in self.checkers:\n if checker.just_changed_state():\n log.warn(\"Checker {} has changed state.\"\n .format(checker.name))\n html += \"<li>\" + checker.html() + \"</li>\\n\"\n \n if isinstance(checker, Process) and checker.state() == FAIL:\n log.warn(\"Process {} is not running.\"\n .format(checker.name))\n html += (\"<li>Attempting to restart \" + \n escape(checker.name) + \"...</li>\\n\")\n try:\n checker.restart()\n except MaxRetriesError, e:\n self.shutdown_reason = str(e)\n return\n time.sleep(5)\n html += (\"<li>State after restart: \" + \n checker.html() + \"</li>\\n\")\n\n if html:\n html = \"<h2>STATE CHANGED:</h2>\\n<ul>\\n\" + html + \"</ul>\\n\" \n html += self.html()\n html += run_commands(self.state_change_cmds)\n self.send_email_with_time(html=html,\n subject=\"Babysitter detected\"\n \" state change.\")\n\n if self._need_to_send_heartbeat():\n self._send_heartbeat()\n\n # Check if a new data subdir has been created\n if self.base_data_dir and self.sub_data_dir:\n if self._find_last_numeric_subdir() != self.sub_data_dir:\n self._send_heartbeat(\"<p>New subdir found so about to restart \"\n \"babysitter. Below are the last stats \"\n \"for the old data subdirectory.</p>\\n\")\n raise NewDataDirError()\n \n time.sleep(UPDATE_PERIOD)", "def balance(self, printTrue = 0):\n for uid, server in self.servers_offline.items():\n for task in server.jobs:\n self.schedule_task(task)\n server.jobs = []\n number_of_jobs_reschedule = len(self.servers_jobs_list[uid])\n self.servers_jobs_list[uid] = []\n if printTrue:\n print(f\"{number_of_jobs_reschedule} jobs from {server.server_name} reassigned\")\n \n all_active_tasks = set()\n for uid, tasks in self.servers_jobs_list.items():\n if self.all_servers[uid].status:\n all_active_tasks = all_active_tasks | set(tasks)\n if not self.all_servers[uid].status:\n self.all_servers[uid].jobs = []\n self.all_servers[uid].waiting_time = 0 \n to_be_rescheduled_tasks = self.all_tasks - all_active_tasks\n if printTrue:\n print(f\"{len(to_be_rescheduled_tasks)} need(s) to be reassigned\")\n to_be_rescheduled_tasks = list(to_be_rescheduled_tasks)\n while len(to_be_rescheduled_tasks):\n task = to_be_rescheduled_tasks.pop(0)\n self.schedule_task(task)\n all_active_tasks.clear()\n\n pop_item = []\n for uid, server in self.servers_offline.items():\n if server.status:\n self.servers_online[uid] = server\n pop_item.append(uid)\n print(f\"Server {server.server_name} came online\")\n self.populate_server(server)\n for item in pop_item:\n self.servers_offline.pop(item)\n \n pop_item = []\n for uid, server in self.servers_online.items():\n if not server.status:\n self.servers_offline[uid] = server\n pop_item.append(uid)\n print(f\"Server {server.server_name} went offline\")\n server.jobs = []\n server.waiting_time = 0\n for item in pop_item:\n self.servers_online.pop(item)", "def update_balance_sheet(group_id):\n\n group_details = dbconn.get_collection('groups').find_one({\"_id\":group_id})\n print(\"Running Split Job on {0} after time {1}\".format(group_details['group_name'],group_details['last_settlement_on']))\n\n for expense in dbconn.get_collection('expenditures').find(\n {'group_id': group_id, 'time':{'$gt':group_details['last_settlement_on']} }):\n\n split_expense = expense['amount']/len(expense['shared_between'])\n lender = expense['member_id']\n borrower_set = set(expense['shared_between']) - set(lender)\n for borrower in borrower_set:\n\n '''\n db.members.update(\n {'_id':'nir', 'borrowing.group_id':'grp_tst','borrowing.member_id':'tst1'}\n ,{$inc:{'borrowing.$.amount':100}}\n\t\t\t\t,{upsert:true}\n\t\t\t\t)\n\t\t\t\t\n\t\t\tdb.members.update(\n\t\t\t\t{'_id':'nir'}\n\t\t\t\t,{'$addToSet': {'borrowing':{ 'group_id':'grp_tst','member_id':'tst1','amount':100}}}\n\t\t\t\t)\n\n '''\n\n try:\n dbconn.get_collection('members')\\\n .update_one(\n {'_id':borrower, 'borrowing.group_id':group_id,'borrowing.member_id':lender}\n ,{'$inc':{'borrowing.$.amount':split_expense}})\n except pymongoerrors.WriteError.code == 16836:\n print('You have never borrowed from this person. Running alternate update command.') # Added for testing\n dbconn.get_collection('members')\\\n .update_one(\n {\"_id\":borrower}\n ,{'$addToSet': {'borrowing':{'group_id':group_id,'member_id':lender,'amount':split_expense}}})\n\n dbconn.get_collection('expenditures').update_one({'_id':ObjectId(expense['_id'])},{'$set':{'settled':True}})\n dbconn.get_collection('groups').update_one({\"_id\":group_id}, {'$set': {'last_settlement_on':datetime.utcnow()}})", "def test_very_many_partitions_and_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(99):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i for i in range(999)}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "def mail_activity(self, list_pc, t_min = 2, t_max = 5, sender = \"bob\", passwd = \"alice\", receiver = \"bob\"):\n for pc in list_pc:\n container = pc[\"properties\"][\"container_id\"]\n self.dm.copy_to_docker(\"./config_files/client/requests_mail.sh\", container)\n self.dm.copy_to_docker(\"./config_files/client/kill_mail.sh\", container)\n self.dm.copy_to_docker(\"./config_files/client/template_mail.txt\", container)\n self.dm.exec_to_docker(container, \"ash requests_mail.sh \"+str(t_min)+\" \"+str(t_max)+\" \"+sender+\" \"+str(passwd)+\" \"+receiver,isdetach=True)\n pass", "def test_very_many_partitions_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(10000):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "def __send_reports__(self,config,mockdb):\n numbers = config.get('Flowcell_reports','numbers').split(',')\n for number in numbers:\n flowcell_report_key = getattr(self,'flowcell_report_' + str(number) + '_key')\n if flowcell_report_key is None:\n continue\n report = mockdb['FlowcellStatisticReport'].objects[flowcell_report_key]\n if report.report_sent is True: #If the report is already sent, next.\n continue\n if not report.__is_complete__(): #If the qsub script is still running, next.\n continue\n if self.sequencing_run_type == 'RapidRun' and str(number) == '16':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n elif self.sequencing_run_type == 'HighThroughputRun' and str(number) == '64':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n else:\n recipients = config.get('Flowcell_reports','subset_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"subset_report\")\n files = []\n files.append(report.report_pdf)\n files.append(report.full_report)\n files.append(report.current_report)\n send_email(subject,body,recipients=recipients,files=files)\n report.__finish__()\n report.report_sent = True\n return 1", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def half_sync(self,delay):\n self.count = 1\n while not self.shutdown and self.loggedin.autosync:\n time.sleep(delay)\n self.count += 1\n self.filelist = self.loggedin.list()\n print \"Pinged server for changes\"\n self.synced = []\n if self.filelist:\n for f in self.filelist:\n path = self.loggedin.sanitize_path(f['path'])\n path = os.path.join(self.onedirrectory, path)\n if not os.path.exists(path):\n os.makedirs(path)\n if f['name'] and not self.loggedin.exists(f):\n exists, data = self.loggedin.getfile(f)\n if exists:\n with open(self.loggedin.make_path(f), 'a') as new_file:\n new_file.write(data)\n new_file.close()\n elif f['name'] and str(self.loggedin.hash_file(f)) != str(f['hash']):\n self.loggedin.sendfile(f['name'], f['path'])\n if self.loggedin.make_path(f) not in self.synced:\n self.synced.append(self.loggedin.make_path(f))\n os_walk = os.walk(self.loggedin.onedirrectory)\n for directory in os_walk:\n for f in directory[2]:\n if f.startswith('.'):\n continue\n path = os.path.join(directory[0], f)\n if path not in self.synced:\n try:\n os.remove(path)\n except OSError, e:\n print (\"Error: %s - %s.\" % (e.filename,e.strerror))", "def sync():\n sync_ssda()", "def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)", "def do_sync_notes(dbsync):\n dbsync.sync_notes()", "def test_very_many_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n fsics[\"sub\"][\"\"].update({uuid.uuid4().hex: i for i in range(10000)})\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "def syncDBLoop(self):\n while 1:\n self.evSyncDB.wait()\n if self.bQuit == True:\n return\n \n self.evSyncDB.clear()\n self.db.syncDB()\n \n self.evAPI.set()", "def run(self):\n self.sync_state()\n self.periodic_resync()\n self.lease_relay.start()\n self.notifications.run_dispatch(self)", "def task(ctx, config):\n if config is None:\n config = {}\n assert isinstance(config, dict), \\\n 'Resolve stuck peering only accepts a dict for config'\n\n manager = ctx.managers['ceph']\n\n while len(manager.get_osd_status()['up']) < 3:\n time.sleep(10)\n\n\n manager.wait_for_clean()\n\n dummyfile = '/etc/fstab'\n dummyfile1 = '/etc/resolv.conf'\n\n #create 1 PG pool\n pool='foo'\n log.info('creating pool foo')\n manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1')\n\n #set min_size of the pool to 1\n #so that we can continue with I/O\n #when 2 osds are down\n manager.set_pool_property(pool, \"min_size\", 1)\n\n osds = [0, 1, 2]\n\n primary = manager.get_pg_primary('foo', 0)\n log.info(\"primary osd is %d\", primary)\n\n others = list(osds)\n others.remove(primary)\n\n log.info('writing initial objects')\n first_mon = teuthology.get_first_mon(ctx, config)\n (mon,) = ctx.cluster.only(first_mon).remotes.keys()\n #create few objects\n for i in range(100):\n rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])\n\n manager.wait_for_clean()\n\n #kill other osds except primary\n log.info('killing other osds except primary')\n for i in others:\n manager.kill_osd(i)\n for i in others:\n manager.mark_down_osd(i)\n\n\n for i in range(100):\n rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1])\n\n #kill primary osd\n manager.kill_osd(primary)\n manager.mark_down_osd(primary)\n\n #revive other 2 osds\n for i in others:\n manager.revive_osd(i)\n\n #make sure that pg is down\n #Assuming pg number for single pg pool will start from 0\n pgnum=0\n pgstr = manager.get_pgid(pool, pgnum)\n stats = manager.get_single_pg_stats(pgstr)\n print(stats['state'])\n\n timeout=60\n start=time.time()\n\n while 'down' not in stats['state']:\n assert time.time() - start < timeout, \\\n 'failed to reach down state before timeout expired'\n stats = manager.get_single_pg_stats(pgstr)\n\n #mark primary as lost\n manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\\\n '--yes-i-really-mean-it')\n\n\n #expect the pg status to be active+undersized+degraded\n #pg should recover and become active+clean within timeout\n stats = manager.get_single_pg_stats(pgstr)\n print(stats['state'])\n\n timeout=10\n start=time.time()\n\n while manager.get_num_down():\n assert time.time() - start < timeout, \\\n 'failed to recover before timeout expired'\n\n manager.revive_osd(primary)", "def test_merge_backup_with_multi_threads(self):\n gen = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"Start doing backup\")\n self.backup_create()\n self.backup_cluster()\n gen = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_cluster(self.threads_count)\n self.backupset.number_of_backups += 1\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Start to merge backup\")\n self.backupset.start = randrange(1, self.backupset.number_of_backups)\n if int(self.backupset.number_of_backups) == 2:\n self.backupset.end = 2\n elif int(self.backupset.number_of_backups) > 2:\n self.backupset.end = randrange(self.backupset.start,\n self.backupset.number_of_backups + 1)\n self.merged = True\n status, output, _ = self.backup_merge()\n self.backupset.end -= 1\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,\n self.skip_consistency, self.per_node)\n self.log.info(\"*** Start to validate data in merge backup \")\n self.validate_backup_data(self.backupset.backup_host, [self.master],\n \"ent-backup\", False, False, \"memory\",\n self.num_items, None)\n self.backup_cluster_validate(skip_backup=True)", "def check_sync(storage):\n\n checks = Check.objects.all()\n\n storage_keys = set(storage.keys())\n check_keys = set(checks.in_bulk().keys())\n\n # Create a dict of checks which has check_id, command and run_frequency\n check_dict = {}\n for check in checks:\n check_dict[check.id] = {}\n\n # if this check needs port to run the command attach target_port to the command\n if '{port}' in check.plugin.template:\n command = check.plugin.template.format(host=check.service.server.address,\n port=check.target_port)\n\n check_dict[check.id]['command'] = command\n\n else:\n command = check.plugin.template.format(host=check.service.server.address)\n check_dict[check.id]['command'] = command\n\n check_dict[check.id]['freq'] = check.run_freq\n\n # synchronize added items\n if check_keys - storage_keys:\n for key in check_keys - storage_keys:\n storage[key] = check_dict[key]\n storage[key]['command'] = check_dict[key]['command']\n storage[key]['freq'] = check_dict[key]['freq']\n\n # Create check RRD\n check_rrd.create(key, storage[key]['freq'])\n\n # synchronize deleted items\n if storage_keys - check_keys:\n for key in storage_keys - check_keys:\n del storage[key]\n\n # Remove check RRD\n check_rrd.remove(key)\n\n # synchronize update items\n for check_id in check_dict:\n if check_dict[check_id]['command'] != storage[check_id]['command']:\n storage[check_id]['command'] = check_dict[check_id]['command']\n\n if check_dict[check_id]['freq'] != storage[check_id]['freq']:\n storage[check_id]['freq'] = check_dict[check_id]['freq']\n\n # Set check RRD heartbeat\n check_rrd.set_heartbeat(check_id, storage[check_id]['freq'])", "def run(self, request, queryset):\n\n for sequence in queryset:\n for settings in sequence.settings.all():\n settings.buffer_file = sequence.buffer_file\n settings.save()\n settings.run()\n\n self.message_user(\n request,\n _('Data synchronization started in background.'))", "def test_soud_to_full_to_full(self):\n # we'll be doing a full sync\n self.initialize_sessions(filters=\"p\")\n\n tablet_tuples = [\n (self.i1, \"p1\", 6),\n ]\n laptop_tuples = [\n (self.i3, \"p\", 5),\n ]\n kdp_tuples = [\n (self.i3, \"p\", 3),\n (self.i4, \"p\", 7),\n ]\n\n tablet_data = self.create_stores(tablet_tuples)\n laptop_data = self.create_stores(laptop_tuples)\n\n self.set_sender_fsic_from_dmcs(tablet_tuples + laptop_tuples)\n self.set_receiver_fsic_from_dmcs(kdp_tuples)\n self.queue()\n assertRecordsBuffered(laptop_data)\n assertRecordsBuffered(tablet_data)", "def run():\n print(\"######## SIMULATION CONFIGURATION ########\")\n print(f\"Total simulation time is {total_simulation_time//60} minute(s)\")\n print(f\"Interval between refresh cycles is {time_interval} second(s)\")\n if number_of_servers == 1: print(\"Number of server is 1\")\n else: print(f\"Number of servers are {number_of_servers}\")\n print(f\"Number of tasks being created are {number_of_tasks}\\n\\n\")\n\n print(\"##### Creating components #####\\n\")\n\n if loadbalancer_type:\n loadbalancer = LoadBalancer(1)\n else:\n loadbalancer = LoadBalancer()\n print(\"Creating servers\")\n server_list = []\n for i in range(number_of_servers):\n server = Server(\"server_\"+str(i),1)\n server_list.append(server)\n print(\"Servers Created\\n\")\n print(\"Creating ChaosMonkey\")\n chaosmonkey = ChaosMonkey(loadbalancer)\n print(\"Created ChaosMonkey\\n\")\n print(\"Creating Tasks\")\n tasks_list = []\n for i in range(number_of_tasks):\n task = Task(\"task_number_\"+str(i),random.randint(0,2))\n tasks_list.append(task)\n print(\"Tasks Created\\n\")\n print(\"##### Components created #####\\n\\n\")\n\n print(\"##### Registering components in loadbalancer #####\\n\")\n for server in server_list:\n loadbalancer.add_server(server)\n for task in tasks_list:\n loadbalancer.register_new_task(task)\n print(\"##### Registered Components #####\\n\\n\")\n\n print(\"##### Beginning simulation #####\")\n for _ in range(0, total_simulation_time, time_interval):\n print(f\"At time {_}second(s):\")\n loadbalancer.balance(1)\n loadbalancer.update()\n loadbalancer.balance()\n chaosmonkey.server_chaos()\n loadbalancer.balance()\n time.sleep(time_interval)\n print(\"----------------------\\n\")\n print(\"Tasks Remaining: \", len(loadbalancer.all_tasks))\n print(\"Servers Remaining: \", len(loadbalancer.servers_online.items()))\n if len(loadbalancer.all_tasks)==0:\n print(\"All tasks are complete, You won :D\")\n break\n if len(loadbalancer.servers_online.items())==0:\n print(\"Chaos Monkey destroyed the system! You lost :(\")\n break\n print(\"----------------------\\n\") \n print(\"##### Simulation ended #####\\n \")", "def _worker():\n try:\n logger.info('Looping...')\n temp_list = []\n for file in ['data_unfcc.csv','data_ebal.csv']:\n temp_list.append(os.path.isfile(file))\n if not all(temp_list):\n print('Starting from scratch...')\n download_source()\n create_database()\n create_index()\n\n time_mod = datetime.strptime(time.ctime(os.stat('data_ebal.csv').st_mtime),'%a %b %d %H:%M:%S %Y')\n time_now = datetime.now()\n\n if (time_now - time_mod).seconds > 3600:\n download_source()\n get_updated_records('unfcc')\n get_updated_records('ebal')\n create_index()\n except Exception as e:\n logger.warning('Main Loop error')" ]
[ "0.55476534", "0.5505242", "0.547258", "0.53901726", "0.5317194", "0.53054774", "0.52975595", "0.52915615", "0.5279213", "0.5249152", "0.52469254", "0.5221539", "0.52172494", "0.5210602", "0.5190685", "0.5178232", "0.5177902", "0.51734185", "0.5141393", "0.51380384", "0.5096278", "0.50633705", "0.5045618", "0.5030264", "0.5026438", "0.5024866", "0.50107116", "0.49892834", "0.4983151", "0.49822038" ]
0.63890535
0
Turn off all slice HTBs
def allOff(): # Get/set special slice IDs root_xid = bwlimit.get_xid("root") default_xid = bwlimit.get_xid("default") kernelhtbs = gethtbs(root_xid, default_xid) if len(kernelhtbs): logger.log("bwmon: Disabling all running HTBs.") for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_eht_off(self):\n raise NotImplementedError", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "def disable_vae_slicing(self):\n self.vae.disable_slicing()", "def off(self):", "def shutdown(self):\n self.disable_modulation()\n self.disable()\n super().shutdown()", "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "def disable(self):", "def disable(self) -> None:", "def turn_off(self, **kwargs):\n self._state = False\n self.schedule_update_ha_state()\n self._hs_color = None\n self._attributes[\"hs_color\"] = self._hs_color\n self._attributes[\"brightness\"] = None", "def off_all(self):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)", "def off(self):\n self._set_state(on=False)", "def firewallOff():\n pass", "def turn_off_outlets(self, outlets):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "def turn_off(self, **kwargs):\n self._lj.deactivate_load(self._index)", "def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")", "def deactivate(self):\n super(Hipchap, self).deactivate()", "def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def turn_off(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x01', True)\n self._state = False\n self.schedule_update_ha_state()", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')", "def off(buttons):\n phatbeat.setup()\n for button in _to_iter(buttons):\n phatbeat._button_handlers.pop(button, None)\n phatbeat._button_repeat.pop(button, None)", "def turn_off(self, **kwargs):\n self.vacuum.stop()\n self.vacuum.home()", "def disable_modulation(self):\n self.write(\":OUTPUT:MOD OFF;\")\n self.write(\":lfo:stat off;\")", "def libc_prctl_capbset_drop_all_capabilities():\n for i in range(CAP_LAST_CAP + 1):\n _call_c_style(libc, \"prctl\", PR_CAPBSET_DROP, i, 0, 0, 0)", "def disable(self):\n pass", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False" ]
[ "0.63343924", "0.57503784", "0.57366097", "0.5704659", "0.5629737", "0.55784094", "0.5566999", "0.55569965", "0.55203027", "0.5464858", "0.5462128", "0.54510033", "0.5431682", "0.54298925", "0.54233426", "0.54200375", "0.5416879", "0.537195", "0.535316", "0.5322585", "0.5306279", "0.5303545", "0.5302396", "0.5300579", "0.5289194", "0.528799", "0.5263636", "0.52519375", "0.52508044", "0.5243139" ]
0.7344729
0
When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting.
def run(): logger.verbose("bwmon: Thread started") while True: lock.wait() logger.verbose("bwmon: Event received. Running.") database.db_lock.acquire() nmdbcopy = copy.deepcopy(database.db) database.db_lock.release() try: if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0: # class show to check if net:InitNodeLimit:bwlimit.init has run. sync(nmdbcopy) else: logger.log("bwmon: BW limits DISABLED.") except: logger.log_exc("bwmon failed") lock.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\r\n event = threading.Event()\r\n while (event.is_set() == False):\r\n # perform database backup\r\n backup()\r\n\r\n # sleep for the predefined amount interval\r\n event.wait(BACKUP_INTERVAL)", "def syncDBLoop(self):\n while 1:\n self.evSyncDB.wait()\n if self.bQuit == True:\n return\n \n self.evSyncDB.clear()\n self.db.syncDB()\n \n self.evAPI.set()", "def run(self): \n\n #acquire the semaphore\n global wheel\n while True:\n time.sleep(2) \n self.s2.acquire() \n self.empty.acquire() \n #remove a table from the list\n\n wheel += 1 \n print \"Producer2(%s):deliver wheels, now wheels:%s\\n\" %(self.name, wheel) \n self.wheel.release() \n #self.threadSemaphore.release() ", "def events_run(lock, qPasswordEvents):\n\n logger.info(\"Worker password events starting\") \n\n dbconnection = connect()\n\n while True:\n\n try:\n event = Event(qPasswordEvents.get())\n except Exception as e:\n logger.error(\"Problem with event: {}\".format(e))\n event.logError(str(e))\n event.setError()\n db.dispatch(dbconnection, event)\n continue\n else:\n logger.info(\"Processing password event from user {}\".format(event.user))\n\n event.setRunning()\n event.logInfo(\"Event is running\")\n logger.debug(\"Event %s is running\" % event.id)\n ##\n # Creating a new password for user\n if event.creatingObject():\n logger.info(\"Creating password {}\".format(event.object.id))\n\n try:\n cursor = db.users(dbconnection, email=event.data['email'])\n u = cursor.next()\n u['password'] = event.data['password']\n db.users(dbconnection, u, u['id'])\n except Exception as e:\n logger.error(\"Problem updating password of user: {} - {}\".format(event.object.id, e))\n event.logError(str(e))\n event.setError()\n continue\n else:\n event.setSuccess()\n event.logInfo(\"Event success\")\n logger.debug(\"Event %s Success\" % event.id)\n ##\n # we then dispatch the event\n db.dispatch(dbconnection, event)", "def test_sync_event_for_getter():\n injector.get(EnvironmentService).cache = {}\n handler = service(PgConnectionHandlerService)()\n\n handler.max_conn = (\n 2 # possible, because connection_pool will be created on first get_connection\n )\n block_event = threading.Event()\n block_event.clear()\n conn = handler.get_connection()\n thread_blocking_conn = Thread(\n target=thread_method_block,\n kwargs={\"handler\": handler, \"block_event\": block_event},\n )\n thread_blocking_conn.start()\n handler.sync_event.clear()\n\n threads: Thread = []\n for i in range(handler.max_conn):\n thread = Thread(target=thread_method, kwargs={\"handler\": handler, \"secs\": 0.1})\n thread.start()\n threads.append(thread)\n handler.sync_event.set()\n sleep(0.1)\n assert not handler.sync_event.is_set()\n handler.put_connection(conn)\n block_event.set()\n for i in range(handler.max_conn):\n threads[i].join()\n thread_blocking_conn.join()", "def _thread_yield(dbapi_con, con_record):\n time.sleep(0)", "def run_ostro(self):\n\n self.logger.info(\"*** start valet-engine main loop\")\n\n # TODO(Gueyoung): Run resource handler thread.\n\n try:\n # NOTE(Gueyoung): if DB causes any error, Valet-Engine exits.\n\n while self.end_of_process is False:\n\n if not self.lock.set_regions():\n break\n\n request_list = self.dbh.get_requests()\n if len(request_list) > 0:\n rc = self._handle_requests(request_list)\n Logger.set_req_id(None)\n if not rc:\n break\n\n time.sleep(1)\n except KeyboardInterrupt:\n self.logger.error(\"keyboard interrupt\")\n except Exception:\n self.logger.error(traceback.format_exc())\n\n self.lock.done_with_my_turn()\n\n self.logger.info(\"*** exit valet-engine\")", "def stopeventmonitor(self):\n self.doeventloop = False\n if self.service is not None:\n self.service.breakloop()\n # reset the service, otherwise nextEvent won\"t work\n self.initeventservice(shutdown=True)\n if self.eventmonthread is not None:\n if emane.VERSION >= emane.EMANE091:\n self.eventmonthread._Thread__stop()\n self.eventmonthread.join()\n self.eventmonthread = None", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def run(self, event, db):\n pass", "def _lock(self):\n import os\n from time import sleep\n # Waits until another process completes it's process\n while os.path.isfile(self.db_path+\".lock\"):\n print(\"Another process is using\",\n self.db_path, \". Waiting for release.\")\n sleep(1)\n with open(self.db_path+\".lock\", 'w') as fp:\n pass", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def run(self): \n global frame\n #acquire the semaphore\n while True:\n time.sleep(2) \n self.s1.acquire() \n self.empty.acquire()\n #remove a table from the list\n frame += 1 \n print \"Producer1(%s):deliver frame, now frame:%s\\n\" %(self.name, frame) \n self.frame.release()\n #self.threadSemaphore.release() ", "def pre_loop(self, event):\n self.do_sync()", "def _watchdog(self):\n while True:\n try:\n # Arno, 2012-07-12: apswtrace detects 7 s commits with yield 5 min, so reduce\n yield 60.0\n\n # flush changes to disk every 1 minutes\n self._database.commit()\n\n except Exception:\n # OperationalError: database is locked\n dprint(exception=True, level=\"error\")\n\n except GeneratorExit:\n if __debug__: dprint(\"shutdown\")\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n self._database.commit(exiting = True)\n break", "def _run(self):\n\t\tself._trigger_manager = UnplugTrigger(self._audio_manager,\n\t\t self._logger)\n\t\tself._trigger_manager.start_listening()\n\t\twhile not self._trigger_manager.is_triggered():\n\t\t\ttime.sleep(0.1)\n\t\tself.is_running = False\n\t\tself._logger.info('Waiting on saves to complete...')\n\t\tself._video_thread.join()\n\t\tself._audio_thread.join()\n\t\tself.mix_audio_and_video()", "def run(self):\n # Don't call this from the thread which it represents.\n assert eventlet.corolocal.get_ident() != self.id\n self.caller_sem = Semaphore(0)\n self.my_sem.release()\n self.caller_sem.acquire() # Wait for it to finish.", "def _monitor(cls):\n while not cls._stop_thread.is_set():\n cls.shrink_cache()\n time.sleep(random.random() * 10)", "async def run(self) -> None:\n self._timer.start()\n self.logger.info(\"Starting beam state sync\")\n self.manager.run_daemon_task(self._periodically_report_progress)\n self.manager.run_daemon_task(self._reduce_spread_factor)\n with self.subscribe(self._peer_pool):\n # The next task is not a daemon task, as a quick fix for\n # https://github.com/ethereum/trinity/issues/2095\n self.manager.run_task(self._match_predictive_node_requests_to_peers)\n await self._match_urgent_node_requests_to_peers()", "def run(self):\n logger.debug(\"Starting a daemon thread to sync experiment states\")\n while self.thread_running.is_set():\n try:\n self.sync_experiment_state_with_ddb()\n except Exception as e:\n logger.warn(\"Exception occurred in Experiment Sync Thread: \" + str(e))\n logger.error(e)\n logger.warn(\"Resuming Sync in 10 seconds...\")\n time.sleep(10)\n time.sleep(0.5)", "def run(self): \n\n #acquire the semaphore \n global frame, wheel \n while True:\n self.frame.acquire() \n frame -= 1\n print \"Consumer(%s):consume frame, now frame:%s, wheels:%s\\n\" %(self.name, frame,wheel) \n self.empty.release()\n self.s1.release()\n\n self.wheel.acquire()\n self.wheel.acquire()\n wheel -= 2\n print \"Consumer(%s):consume wheels, now frame:%s, wheels:%s\\n\" %(self.name, frame,wheel) \n self.empty.release()\n self.empty.release()\n self.s2.release()\n self.s2.release()\n time.sleep(2) \n print \"!!!!creat a car\\n\"", "def sync_audit_thread(self, engine_id):\n\n while True:\n try:\n self.run_sync_audit(engine_id)\n eventlet.greenthread.sleep(CHECK_AUDIT_INTERVAL)\n except eventlet.greenlet.GreenletExit:\n # We have been told to exit\n return\n except Exception as e:\n LOG.exception(e)", "def __loop(self):\n\t\twhile True:\n\t\t\tself.AL.acquire()\n\t\t\ttr = self.ls.keys() + self.s2ai.keys()\n\t\t\ttw = []\n\t\t\ttcx = tr\n\t\t\tself.AL.release() # other threads may modifiy this while we wait\n\t\t\tif len(tr + tw + tcx) == 0:\n\t\t\t\t# acording to docs, three empty lists are platform dependant.\n\t\t\t\t# better not wait with empty lists.\n\t\t\t\ttime.sleep(ISETO) # dont waste resources in a while-true-pass-loop\n\t\t\t\tcontinue\n\t\t\tra, wa, xs = select.select(tr, tw, tcx, ISETO)\n\t\t\t# timeout so we can check for new sockets to read from\n\t\t\tself.AL.acquire()\n\t\t\tfor s in xs:\n\t\t\t\tif s in ra:\n\t\t\t\t\tra.remove(s)\n\t\t\t\t\tinfo = self.s2i[s]\n\t\t\t\tif s in wa:\n\t\t\t\t\twa.remove(s)\n\t\t\t\tself._close_s(s)\n\t\t\tfor s in ra:\n\t\t\t\tif s in self.ls.keys():\n\t\t\t\t\t# new connection\n\t\t\t\t\tc, a = s.accept()\n\t\t\t\t\tself.ls[s][\"got\"] += 1\n\t\t\t\t\tport = self.ls[s][\"port\"]\n\t\t\t\t\tlp = c.getsockname()[1]\n\t\t\t\t\tpeer = self.ls[s][\"peer\"]\n\t\t\t\t\tai = (self.cid, lp)\n\t\t\t\t\tself.ai2s[ai] = c\n\t\t\t\t\tself.s2ai[c] = ai\n\t\t\t\t\tself.s2i[c] = {\n\t\t\t\t\t\t\"ai\": ai,\n\t\t\t\t\t\t\"recv\": 0,\n\t\t\t\t\t\t\"send\": 0,\n\t\t\t\t\t\t\"local\": lp,\n\t\t\t\t\t\t\"creator\": self.cid,\n\t\t\t\t\t\t\"peer\": peer,\n\t\t\t\t\t\t\"socket\": c,\n\t\t\t\t\t\t\"port\": port\n\t\t\t\t\t\t}\n\t\t\t\t\tself.send_to(\n\t\t\t\t\t\tpeer,\n\t\t\t\t\t\tID_PC + \"BRIDGE\" + struct.pack(\"!HH\", port, lp)\n\t\t\t\t\t\t)\n\t\t\t\tif s in self.s2ai.keys():\n\t\t\t\t\tai = self.s2ai[s]\n\t\t\t\t\tdata = s.recv(RECV_BUFF)\n\t\t\t\t\tself.s2i[s][\"send\"] += len(data)\n\t\t\t\t\tinfo = self.s2i[s]\n\t\t\t\t\tif len(data) == 0:\n\t\t\t\t\t\tself._close_s(s)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tpi = struct.pack(\"!H\", ai[1])\n\t\t\t\t\tif COMPRESSION > 0:\n\t\t\t\t\t\tcomp = zlib.compress(data, COMPRESSION)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcomp = data\n\t\t\t\t\ttosend = encrypt(comp, self.__key)\n\t\t\t\t\tself.send_to(\n\t\t\t\t\t\tinfo[\"peer\"], ID_MSG + chr(ai[0]) + pi + tosend\n\t\t\t\t\t\t)\n\t\t\tfor s in wa:\n\t\t\t\tpass\n\t\t\tself.AL.release()", "def run(self):\n LOG.debug(\"ReaderThread up and running\")\n\n lastevictTime = 0\n while ALIVE:\n for col in allLivingCollectors():\n for line in col.collect():\n self.processLine(col, line)\n now = int(time.time())\n if now - lastevictTime > self.evictinterval:\n lastevictTime = now\n now -= self.evictinterval\n for col in allCollectors():\n col.evictOldkeys(now)\n # BUGBUG : not good\n time.sleep(1)", "def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name", "def _mainloop(self):\n while not self._shutdown:\n events = self._selector.select(timeout=0.01)\n for key, _ in events:\n key.data(key.fileobj)\n self.close()", "def acquire(self, blocking=True, shared=False):", "def thread_method_block(handler, block_event):\n with ConnectionContext(handler):\n block_event.wait()", "def sync() -> None:", "async def do_wait(self) -> None:\n async with self.running_wait.needs_run() as needs_run:\n if needs_run:\n for number in self.pending_remove:\n del self.number_to_cb[number]\n self.pending_remove = set()\n maxevents = 32\n if self.input_buf is None:\n self.input_buf = await self.ram.malloc(EpollEventList, maxevents * EpollEvent.sizeof())\n if self.syscall_response is None:\n if self.wait_readable:\n await self.wait_readable()\n self.syscall_response = await self.epfd.task.sysif.submit_syscall(\n SYS.epoll_wait, self.epfd.near, self.input_buf.near, maxevents, self.timeout)\n if self.valid_events_buf is None:\n count = await self.syscall_response.receive()\n self.valid_events_buf, _ = self.input_buf.split(count * EpollEvent.sizeof())\n received_events = await self.valid_events_buf.read()\n self.input_buf = None\n self.valid_events_buf = None\n self.syscall_response = None\n for event in received_events:\n if event.data not in self.pending_remove:\n self.number_to_cb[event.data](event.events)" ]
[ "0.60967696", "0.5767627", "0.5736665", "0.5632945", "0.5563657", "0.54911983", "0.54457074", "0.5422245", "0.54212034", "0.5407008", "0.5403839", "0.5361349", "0.5359709", "0.5354534", "0.53536624", "0.5333217", "0.5309702", "0.5295493", "0.52853715", "0.528163", "0.5264975", "0.526257", "0.5245213", "0.52404475", "0.5221516", "0.52160364", "0.5213653", "0.52102876", "0.52012", "0.5198868" ]
0.6785303
0
Read or create a new pickle file and return the data.
def read_or_new_pickle(filename, value, *args, **kwargs): data = None filename = "{}.pkl".format(filename) os.makedirs(os.path.dirname(filename), exist_ok=True) if os.path.isfile(filename): # If file had been created, but is empty return None since another process # could be writing to it. if os.path.getsize(filename) > 0: with open(filename, "rb") as f: try: data = pickle.load(f) except Exception as e: print(e) raise e else: # open(filename, "ab").close() if callable(value): data = value(*args, **kwargs) else: data = value with open(filename, "wb") as f: pickle.dump(data, f) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_object(self, filename):\n with open(filename, 'rb') as inp: # Overwrites any existing file.\n data = pickle.load(inp)\n return data", "def load_pickle_data(filename):\n path = \"../tmp/{}.pckl\".format(filename)\n if os.path.exists(path):\n print(\"LOADING PCKL FILE FROM {}\".format(path))\n f = open(path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj", "def read_pickle(file_name):\n with open(file_name, 'rb') as f:\n obj = pickle.load(f)\n return obj", "def read_pickle_object_in_file(self):\n outobj = None\n if os.path.exists(self.pickle_file):\n with gzip.open(self.pickle_file, 'rb') as pkl_file:\n outobj = pickle.load(pkl_file)\n return outobj", "def read_from_file(name):\n print 'reading structures from pickle'\n print '------------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'rb')\n new_obj = pickle.load(file)\n file.close()\n\n return new_obj", "def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data", "def load_data(file_name):\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n return data", "def read_pickle(path):\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n\n return data", "def grab (filename) :\n cin = open (filename, 'r')\n return pickle.load (cin)", "def read_pickle_object(pickle_file_name):\n with open(pickle_file_name, 'rb') as input:\n m = pickle.load(input)\n return m", "def load(fname):\r\n with open(fname, 'rb') as f:\r\n data = pickle.load(f)\r\n return data", "def load_data(file_name: str) -> Optional[Any]:\n with open(file_name, \"rb\") as input_data:\n data = pickle.load(input_data)\n return data", "def get_pickle_file(file_name):\n\t# change directory\n\tos.chdir(\"../\")\n\tos.chdir(\"data\")\n\t# pickle activities\n\tf = open(file_name, \"rb\")\n\tpython_data = pickle.load(f)\n\tf.close()\n\t# change directory\n\t# change directory\n\tos.chdir(\"../\")\n\tos.chdir(\"pathfinder\")\n\treturn python_data", "def load_pickle(file_name):\n with open(file_name, \"rb\") as handle:\n pickle_file = pickle.load(handle)\n\n return pickle_file", "def load(self):\n f = self.open(\"rb\")\n try:\n import pickle\n\n return error.checked_call(pickle.load, f)\n finally:\n f.close()", "def loadPickle(pickle_file):\n print(\"Loading pickle data from file: \"+pickle_file)\n\n data = None\n try:\n with open(pickle_file, \"rb\") as fd:\n data = pickle.load(fd)\n except EOFError:\n pass\n except pickle.UnpicklingError as upe:\n print(\"Failed: Loading Pickle Data\")\n except IOError:\n data = {}\n\n return data", "def pickle_from_file(fname):\n\ttry:\n\t\tfh = open(fname, 'r')\n\t\tdata = cPickle.load(fh)\n\t\tfh.close()\n\texcept:\n\t\t#raise\n\t\tprint \"Loading pickled data failed!\", sys.exc_info()[0]\n\t\tdata = None\n \n\treturn data", "def pickle_read(file_path):\n\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def read_pickle(file_path):\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def load_pickle(path):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n return data", "def _read_pkl(self, input_file):\n data = pickle.load(open(input_file, 'rb'))\n return data", "def load(self):\n result = bolt.PickleDict.load(self)\n if not result and self.oldPath.exists():\n ins = None\n try:\n ins = self.oldPath.open('r')\n self.data.update(compat.uncpickle(ins))\n ins.close()\n result = 1\n except EOFError:\n if ins: ins.close()\n #--Done\n return result", "def handle_pickle(data, name, mode):\n pickle_name = os.path.join(os.path.dirname(pickle_loc), name)\n if mode == \"w\":\n with open(pickle_name, \"wb\") as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n return None\n\n if mode == \"r\":\n with open(pickle_name, \"rb\") as handle:\n b = pickle.load(handle)\n return b", "def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)", "def load_from_file(self, name):\n if os.path.isdir(\"saved_data\"):\n with open(f'saved_data/{name}.txt', 'rb') as file:\n data = pickle.load(file)\n print(\"Successfully load from file\")\n return data\n else:\n os.mkdir(\"saved_data\")\n self.load_from_file(name)", "def load_data_pickle(PATH, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"rb\") as f:\n new_data = pickle.load(f)\n\n # print(filename, \"opened\")\n return new_data", "def load_file(self, filename):\n with open(filename, \"rb\") as pickle_handle:\n return pickle.load(pickle_handle)" ]
[ "0.7194657", "0.7113822", "0.7054853", "0.70204663", "0.69924474", "0.69662946", "0.6940556", "0.68701947", "0.68606484", "0.684021", "0.6837062", "0.6820283", "0.680073", "0.67892516", "0.67740124", "0.676149", "0.67593324", "0.675681", "0.6748782", "0.6723315", "0.6710549", "0.6710549", "0.6705637", "0.66935635", "0.66613644", "0.6652553", "0.6649105", "0.66447574", "0.6634105", "0.6622428" ]
0.7277842
0
Read or create a new json file and return the data.
def read_or_new_json(filename, value, *args, **kwargs): data = None filename = "{}.json".format(filename) os.makedirs(os.path.dirname(filename), exist_ok=True) if os.path.isfile(filename): # If file had been created, but is empty return None since another process # could be writing to it. if os.path.getsize(filename) > 0: with open(filename, "r") as f: try: data = json.load(f, preserve_order=False) except Exception as e: print(e) raise e else: if callable(value): data = value(*args, **kwargs) else: data = value with open(filename, "w") as f: json.dump(data, f, indent=4, separators=(',', ': '), sort_keys=True, allow_nan=True) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def read_json_file(self, fname):\n return {}", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def load_json(filename: str):\n filepath = get_file_path(filename)\n\n if filepath.exists():\n with open(filepath, mode=\"r\", encoding=\"UTF-8\") as f:\n data = json.load(f)\n return data\n else:\n save_json(filename, {})\n return {}", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def json_from_file(name):\n with open(name) as f_p:\n return json.load(f_p)", "def open_json(path):\n with open(path, \"r\") as json_data_file:\n data = json.load(json_data_file)\n return data", "def read_data(file_name):\n file_path = pathlib.Path(__file__).parent / f'data/{file_name}'\n try:\n with open(file_path) as file:\n data = json.load(file)\n except (FileNotFoundError, json.JSONDecodeError) as e:\n # json decode error happens when file is empty\n data = []\n with open(file_path, 'w') as file:\n json.dump(data, file)\n logger.error(f'New empty file created. Error: {e}')\n\n return data", "def newJsonFile():\n nonlocal currentFile\n currentFile = join(rootFolder, 'tweetsRaw-' + str(now()) + '.json')\n f = open(currentFile)\n f.write('[\\n')\n return f", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def read_json(filepath):\n if (filepath in _json_cache):\n return _json_cache[filepath]\n with open(filepath, 'r', encoding='utf-8') as fileinfo:\n data = json.load(fileinfo)\n _json_cache[filepath] = data\n return data", "def read(self, filepath, dirpath=None):\n try:\n #filepath = os.path.normpath(filepath)\n with open(filepath) as f_p:\n try:\n self.json_dict = json.load(f_p)\n self.filepath = filepath\n return self.json_dict\n except ValueError as err:\n print('JSON content error in \"%s\"' % filepath)\n print(err)\n except (IOError, FileNotFoundError):\n print(\n 'Failed to open JSON file \"%s\" \"%s\"' %\n (os.path.abspath(''), filepath))\n raise NoSuchFileError(filepath)\n raise JsonContentError", "def load_json(file_name):\n return json.load(open(file_name))", "def read_json(fpath):\n with open(fpath, 'r') as f:\n obj = json.load(f)\n return obj", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def updated_data():\n with open(UPDATED_JSON, 'r') as f:\n updated_data = json.load(f)\n return updated_data", "def read_json(self, *args, **kwargs):\n with self.open('rb') as f:\n return json.load(f, *args, **kwargs)", "def _read_json_file(self):\n with open(self.subcfgfilename) as json_file:\n json_string = json_file.read()\n json_data = json.loads(json_string)\n return(json_data)", "def get_json(filename) :\n result = json.load(open(filename,'r'))\n return result", "def read_json(self, json_name):\r\n with open(json_name, 'r') as infile:\r\n self.pet_file = json.load(infile) # load existing json file\r\n self.pet_file_name = json_name # set name to passed name\r", "def load_json(fpath):\n with open(fpath, \"r\") as f:\n return json.load(f)", "def read_file(path):\n with open(path) as json_file:\n data = json.load(json_file)\n return data", "def read_json(jsonfp):\n with JsonFile(jsonfp) as jsondata:\n return jsondata", "def load_json(file_name):\n with open(file_name) as f:\n data = json.load(f)\n\n return data", "def read_json(file_name):\n try:\n with open(file_name, \"rt\") as input_file:\n return json.loads(input_file.read())\n except TypeError:\n print(\"No valid JSON data!\")\n raise\n except IOError:\n print(\"Could not read file from disk!\")\n raise", "def get_json(filename):\n with open(filename) as f:\n file_content = f.read()\n data = json.loads(file_content)\n return data", "def read_json(self):\n self._fopen.seek(self._json_start, 0)\n return json.loads(self._fopen.read().decode('utf-8'))", "def read_json(fname):\n with open(fname) as f:\n d = json.load(f)\n return d" ]
[ "0.7190205", "0.7190205", "0.71838874", "0.71492124", "0.7089315", "0.7088723", "0.7088723", "0.7031506", "0.701352", "0.699548", "0.6972864", "0.692402", "0.6919216", "0.6880745", "0.6876042", "0.68599683", "0.6858713", "0.68038887", "0.67970616", "0.67945015", "0.6791648", "0.6775777", "0.6774441", "0.676945", "0.67542064", "0.6751034", "0.67133904", "0.6712746", "0.6710839", "0.67000055" ]
0.73330307
0
Add postfix to a filename to make it unique.
def add_unique_postfix(filename): if not os.path.exists(filename): return filename path, name = os.path.split(filename) name, ext = os.path.splitext(name) make_filename = lambda i: os.path.join(path, '{}_{}{}'.format(name, i, ext)) for i in range(1, sys.maxsize): unique_filename = make_filename(i) if not os.path.exists(unique_filename): return unique_filename return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def giverandomfilename(self,user,postfix=\"\"):\n return \"%s_%s_%s\" % (user.username.encode(\"ascii\",\"ignore\"),\n str(randint(10000,99999)),\n \"testfile%s.txt\" % postfix)", "def add_suffix_to_filename(filename, suffix):\n name, ext = os.path.splitext(filename)\n return ''.join([name, suffix, ext])", "def unique_filename(data):\n file = data\n get_ext = file.filename.split(\".\")[-1]\n new_name = \"%s.%s\" % (uuid.uuid4().hex, get_ext)\n return new_name", "def __add_filename_suffix(filename, suffix):\n return \"{}{}.pdf\".format(filename.split(\".pdf\", 1)[0], suffix)", "def fix_file(\n self,\n filepath: Path,\n output_dir: Path,\n add_unique_suffix: bool = False,\n ) -> str | Path:\n return filepath", "def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)", "def supplement_file_name(file: Union[str, pathlib.Path], sup: str) -> pathlib.Path:\n\n file = pathlib.Path(file)\n\n # the `suffix` is incorporated into the file name\n return file.with_name(file.stem + f'_{sup}' + file.suffix)", "def unique_name(file: Union[str, pathlib.Path]) -> pathlib.Path:\n\n return supplement_file_name(file, uuid.uuid1())", "def append_name(name, postfix):\n if name is None:\n ret = None\n elif name == '':\n ret = postfix\n else:\n ret = '%s_%s' % (name, postfix)\n return ret", "def make_new_path(path, postfix = \"\", ext = \"\"):\n dir = os.path.split(path)[0]\n old_basename, old_ext = os.path.splitext(path)\n new_basename = old_basename + \"_\" + postfix\n new_path = os.path.join(dir, new_basename + \".\" + ext)\n return new_path", "def _unique_path(user_id, filename, category='images'):\n ext = os.path.splitext(filename)[-1]\n new_filename = '{}{}'.format(uuid.uuid4(), ext)\n return os.path.join(category, str(user_id), new_filename)", "def increment_filename(self, filename, path=\"\", insert=\"\"):\n path = path.strip(\"/\")\n basename, ext = os.path.splitext(filename)\n for i in itertools.count():\n if i:\n insert_i = \"{}{}\".format(insert, i)\n else:\n insert_i = \"\"\n name = \"{basename}{insert}{ext}\".format(\n basename=basename, insert=insert_i, ext=ext\n )\n if not self.exists(\"{}/{}\".format(path, name)):\n break\n return name", "def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)", "def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname", "def generate_filename_prefix(self):\n email = self.get_user_email()\n if not email:\n return ''\n\n return '%s_' % md5_hash(email, self.salt)", "def _create_unique_filename_with_integer_suffix(fullpath):\n # create an unique filename\n suffix = None\n suffix_cnt=1\n while os.path.exists(fullpath):\n if suffix: fullpath = fullpath[0:-len(suffix)]\n suffix = \".%s\" % suffix_cnt\n suffix_cnt+=1\n fullpath = fullpath + suffix\n return fullpath", "def prefix_file(filename, prefix):\n path, file_or_dir = os.path.split(filename)\n new_filename = os.path.join(path, prefix + file_or_dir)\n os.rename(filename, new_filename)", "def prefixer_extensioner(file_path, old, new, file_content=None):\n log.debug(\"Prepending '{}' Prefix to {}.\".format(new.upper(), file_path))\n global args\n extension = os.path.splitext(file_path)[1].lower().replace(old, new)\n filenames = os.path.splitext(os.path.basename(file_path))[0]\n filenames = args.prefix + filenames if args.prefix else filenames\n if args.hash and file_content: # http://stackoverflow.com/a/25568916\n filenames += \"-\" + sha1(file_content.encode(\"utf-8\")).hexdigest()[:11]\n log.debug(\"Appending SHA1 HEX-Digest Hash to '{}'.\".format(file_path))\n dir_names = os.path.dirname(file_path)\n file_path = os.path.join(dir_names, filenames + extension)\n return file_path", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def save_file_with_id_name(self, filename):\n file_ = filename.split(os.sep)[-1]\n extension = \".\".join(file_.split(\".\")[-1:])\n filename = str(uuid.uuid4()) + \".\" + extension\n return filename", "def create_new_name(self, file_name):\n new_name = file_name\n index = 0\n while new_name in self.used_names:\n new_name = file_name + \"-\" + str(index)\n index += 1\n return new_name", "def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n name = filepath_name_only(filepath)\n return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))", "def _set_filename(self, filename):\n tmp_file = '_'.join(filename.split())\n# new_file = new_file.replace(\"'\",\n# '_').replace('-',\n# '_').replace(' ',\n# '_').replace('(', '_').replace(')', '_')\n new_file = ''\n pathsep = os.path.sep \n if sys.platform == 'win32':\n pathsep = '/'\n for char in tmp_file:\n if char.isalnum() or char in ['.', '_', ':', pathsep, '-']:\n new_file += char\n try:\n shutil.copy(filename, new_file)\n except shutil.Error, err:\n msg = \"`%s` and `%s` are the same file\" % (filename, new_file)\n if str(err) == msg:\n pass\n else:\n raise err\n utils.ensure_file_exists(new_file)\n self._filename = new_file\n self._basename, self._ext = os.path.splitext(self._filename)", "def __rename_file(filename, suffix):\n filename = PDFWorkshop.__clean_filename(filename)\n return PDFWorkshop.__add_filename_suffix(filename, suffix)", "def _create_file_name(file_path):\r\n file_base, file_ext = os.path.splitext(file_path)\r\n if os.path.isfile(file_path):\r\n nfile = 1\r\n check = True\r\n while check:\r\n name_add = '0000' + str(nfile)\r\n file_path = file_base + \"_\" + name_add[-4:] + file_ext\r\n if os.path.isfile(file_path):\r\n nfile = nfile + 1\r\n else:\r\n check = False\r\n return file_path", "def get_unique_filename(fname, fdir):\n ext = path.splitext(fname)[1]\n\n while True:\n uid = uuid4().hex\n name = uid + ext\n if not path.exists(path.join(fdir, name)):\n return name", "def prep_file_name(path, file):\r\n name = path.__str__() + '~' + file.__str__()\r\n name = name.lower()\r\n name = name.replace(' ', '_')\r\n name = re.sub('[^a-z0-9\\-_!.~]+', '', name)\r\n return name", "def _hash_and_write(outdir, mfile):\n name, filebytes = _hash_midi(mfile)\n newpath = os.path.join(outdir, name)\n if os.path.exists(newpath):\n return '__duplicate__'\n with open(newpath, 'wb') as fhandle:\n fhandle.write(filebytes)\n return name" ]
[ "0.6688911", "0.6385569", "0.6384604", "0.62978894", "0.6184149", "0.61631465", "0.61355627", "0.6128925", "0.61214083", "0.6111603", "0.6055405", "0.6050752", "0.6027128", "0.5992498", "0.5985448", "0.5940152", "0.59259474", "0.5904364", "0.58930105", "0.58843166", "0.58842504", "0.5876831", "0.5871596", "0.58400255", "0.5835133", "0.58092886", "0.57521045", "0.57369804", "0.57356733", "0.571386" ]
0.8221782
0
Given a document (represented as a string), return a list of all of the words in that document, in order. Process document by coverting all words to lowercase, and removing any punctuation or English stopwords.
def tokenize(document): raw_words=[word.lower() for word in nltk.word_tokenize(document) if word.isalpha()] raw_words=[word.lower() for word in raw_words] final_words=[] for word in raw_words: if word not in string.punctuation and word not in nltk.corpus.stopwords.words("english"): final_words.append(word.lower()) return final_words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenize(document):\n token = nltk.word_tokenize(document)\n\n output = [word.lower() for word in token if (word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\"))]\n\n return output", "def tokenize(document):\n words = [word.lower() for word in nltk.word_tokenize(document) if word not in string.punctuation and word not in nltk.corpus.stopwords.words(\"english\") ]\n\n return sorted(words)", "def tokenize(document):\n import string\n\n # tokenize the given document\n words = nltk.tokenize.word_tokenize(document)\n words = [word.lower() for word in words]\n\n # filter words from punctuations and stopwords\n loop_words = words.copy()\n for word in loop_words:\n if word in [char for char in string.punctuation] + nltk.corpus.stopwords.words(\"english\"):\n words.remove(word)\n\n return words", "def tokenize(document):\n words = nltk.word_tokenize(document)\n # tokenize files into a list of words\n return [\n word.lower() for word in words\n # filter out all punctuation (import string) and all stopwords\n if not all(char in string.punctuation for char in word) \n and word not in nltk.corpus.stopwords.words(\"english\")\n ]", "def tokenize(document):\n\n # Unable to acquire 'stopwords' without these snippets due to my python config\n # import ssl\n # ssl._create_default_https_context = ssl._create_unverified_context\n # nltk.download('stopwords')\n\n stops = nltk.corpus.stopwords.words(\"english\")\n\n all_words = list()\n cleaned_words = list()\n\n all_words = nltk.word_tokenize(document)\n\n for word in all_words:\n word = word.strip()\n word = word.lower()\n\n if word in stops \\\n or not word \\\n or word in string.punctuation \\\n or word.strip(\"=\") != word:\n continue\n else:\n cleaned_words.append(word)\n\n return cleaned_words", "def getWords(docstr):\n # get rid of digits and non-alphanumeric chars\n # and split on spaces\n wds = re.sub('\\d', ' ', docstr)\n wds = re.sub('[\\W_]', ' ', wds)\n wds = wds.split()\n\n # convert to lowercase and get rid of stop words\n wordlist = [w.lower() for w in wds]\n wordlist = [w for w in wordlist if w not in stopWords]\n wordlist = [w for w in wordlist if len(w) >= 3]\n\n return wordlist", "def get_words(doc):\n splitter = re.compile('\\\\W*')\n # Split the words by non-alpha characters\n words = [s.lower() for s in splitter.split(doc) \n if len(s)>2 and len(s)<20]\n # Return the unique set of words only\n return dict([(w,1) for w in words])", "def tokenize(document):\n\n # Initialize the list of words, punctuation and stopwords\n keywords = []\n punctuations_list = string.punctuation\n stopwords_list = nltk.corpus.stopwords.words(\"english\")\n\n # Make a naive tokenized copy of the `document`\n naive_list = nltk.word_tokenize(document)\n\n for item in naive_list:\n\n # Ignore punctuations or stopwords\n if (item in punctuations_list) or (item in stopwords_list) or (len(item) < 4):\n continue\n\n # Add document by coverting all words to lowercase\n keywords.append(item.lower())\n\n return keywords", "def tokenize(document):\n nltk.download('punkt')\n nltk.download('stopwords')\n wordList = nltk.word_tokenize(document.lower())\n punctuation = string.punctuation\n stopWords = nltk.corpus.stopwords.words(\"english\")\n listCopy = copy.deepcopy(wordList)\n for w in listCopy:\n if w in stopWords or w in punctuation:\n newList = list(filter(w.__ne__, wordList))\n wordList = newList\n return wordList", "def process_text(text):\n words = word_tokenize(text)\n return [word.lower() for word in words if word not in string.punctuation]", "def normalize_words(document):\n stopwords = set(nltk.corpus.stopwords.words('english'))\n lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n for token in document:\n token = token.lower()\n if token in string.punctuation: continue\n if token in stopwords: continue\n yield lemmatizer.lemmatize(token)", "def tokenize(document):\n terms = document.lower().split()\n space = ' '\n return [term.strip(characters) for term in terms if term not in space]", "def get_word_list(text_string):\n\ttext_no_punc = ''\n\ttext = text_string[600:] #kill the header\n\tfor char in text: #killing punctuation\n\t\tif not is_punct_char(char):\n\t\t\ttext_no_punc = text_no_punc+char #so extend the string everytime we run into a letter\n\ttext_no_punc_lower = string.lower(text_no_punc)\n\tlist_of_words = []\n\tlist_of_words = text_no_punc_lower.split( ) #splitting the string into the list\n\treturn list_of_words", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def _words(self):\n regex = r'\\b\\w+\\b'\n for word in re.findall(regex, self.text):\n yield word", "def preprocess(self, doc):\n self.stopwords = []\n doc = doc.split()\n new_doc = []\n for word in doc:\n if word.isalpha() and word.lower() not in self.stopwords:\n word = word.lower()\n if self.stemmer != None:\n word = self.stemmer.stem(word)\n new_doc.append(word)\n return new_doc", "def text_to_wordlist(text, remove_stopwords=True):\n text = re.sub(\"[^a-zA-Z]\",\" \", text)\n words = text.lower().split()\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n b=[]\n stemmer = english_stemmer\n for word in words:\n b.append(stemmer.stem(word))\n return(b)", "def create_word_list(text_as_string):\n # print 'creating word list'\n global global_word_list\n\n for w in text_as_string.split():\n word = w.translate(string.maketrans(\"\", \"\"), string.punctuation).lower()\n if len(word) > 0:\n global_word_list.append(word) # Appends each word to global word list\n\n return global_word_list", "def words(s):\n\n return (normalize(w) for w in _words_re.findall(s))", "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def extract_words(s):\n\n # Convert the data the data into normal for (Eg: 'ç' to 'c') and lowercase it.\n s = unicodedata.normalize('NFKD', s).lower()\n\n # Replace the punctuation with a space using the _regex and filter stopwords.\n wordlist = [w for w in _regex.sub(' ', s).split() if w not in _stopwords]\n\n return wordlist", "def text_to_wordlist(raw_text, remove_stopwords=False):\n\n # Pre-processing. Removing HTML\n # TODO: Do further pre-processing (eg. remove links)\n text = BeautifulSoup(raw_text).get_text()\n\n # Remove non-letters\n text = re.sub(\"[^a-zA-Z]\",\" \", text)\n\n # Convert words to lower case and split them\n words = text.lower().split()\n\n # Optionally remove stop words (false by default)\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n\n return(words)", "def clean_doc(doc):\n tokens = doc.split()\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens]\n tokens = [word.lower() for word in tokens if word.isalpha()]\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def tokens(doc):\n return (tok.lower() for tok in re.findall(r\"\\w+\", doc))", "def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2", "def q_tokenize(document):\n final_words = []\n avoided_words = [] # WORDS WHICH ARE TO BE AVOIDED IN THE FINAL LIST\n \n # making the avoided_words list\n for word in string.punctuation: # the string library has a string of punctuations\n avoided_words.append(word)\n for word in nltk.corpus.stopwords.words(\"english\"): # the nltk lib. has a list of stopwords commonly used in english\n avoided_words.append(word)\n\n tokens = nltk.word_tokenize(document)", "def txt_to_word_list(text):\r\n return [w for w in text.split()]", "def _get_word_list(text):\n return re.findall('\\w+', text)" ]
[ "0.79225713", "0.78998256", "0.77558446", "0.7736347", "0.77261716", "0.766667", "0.7387504", "0.7341044", "0.72900367", "0.7227586", "0.70762444", "0.7042829", "0.7022621", "0.70182997", "0.70182997", "0.69928956", "0.6972842", "0.6969272", "0.6964892", "0.69495815", "0.6945343", "0.6925848", "0.6903821", "0.68914884", "0.6843034", "0.6828342", "0.6812949", "0.6787303", "0.67667854", "0.67572755" ]
0.79469746
0
Like `bool`, but the string 'False' evaluates to `False`.
def bool_(val): if isinstance(val, six.string_types) and val.lower() == 'false': return False return bool(val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def str_bool(s):\n if not s:\n return False\n if type(s) != str:\n # It's not a string and it's not falsy, soooo....\n return True\n s = s.lower()\n if s in [\"false\", \"0\", \"no\", \"n\"]:\n return False\n return True", "def toBool( string ):\r\n return string == 'true'", "def boolean(s):\r\n ss = str(s).lower()\r\n if ss in TRUTHY_STRINGS:\r\n return True\r\n elif ss in FALSY_STRINGS:\r\n return False\r\n else:\r\n raise ValueError(\"not a valid boolean value: \" + repr(s))", "def is_false(value):\n \n return (value is False)", "def str_to_bool(str):\r\n return False if str is None else str.lower() == \"true\"", "def boolean_string(s):\n if s is None:\n return False\n s = str(s)\n if s.lower() not in {'false', 'true'}:\n raise ValueError('Not a valid boolean string')\n return s.lower() == 'true'", "def __str_to_bool(self, s):\n if s == 'True':\n return True\n elif s == 'False':\n return False\n else:\n raise ValueError", "def string_to_bool(value):\n return False if value.upper() == \"FALSE\" or value == \"0\" or value == \"\" else True", "def __bool__(self):\n return _libsbml.string___bool__(self)", "def str2bool(s):\n if s == \"True\":\n return True\n elif s == \"False\":\n return False\n else:\n raise ValueError", "def boolean(val):\n\tif val == \"True\" or val == \"1\":\n\t\treturn True\n\telse:\n\t\treturn False", "def is_bool (self, phrase):\r\n \r\n return isinstance(phrase,bool)", "def _isbool(string):\n return type(string) is bool or (\n isinstance(string, (bytes, str)) and string in (\"True\", \"False\")\n )", "def str2bool(value):\n if value:\n return value.lower() in (\"true\",)\n else:\n return False", "def str2bool(value):\n if value:\n return value.lower() in (\"true\",)\n else:\n return False", "def str2bool(self, val):\n return val.lower() in ('true','yes','t',1)", "def is_bool(self):\n return False", "def asbool(s):\n if s is None:\n return False\n if isinstance(s, bool):\n return s\n s = str(s).strip()\n return s.lower() in truthy", "def asbool(s):\n if s is None:\n return False\n if isinstance(s, bool):\n return s\n s = str(s).strip()\n return s.lower() in truthy", "def test_human_readable_boolean_false():\n # TODO: add a test case that follows the provided example", "def to_boolean(self,string):\n if self.debug:\n print('to_boolean'+lineno())\n # FIXME\n sys.exit(1)\n #string.to_s.casecmp('true').zero?", "def str_to_bool(a):\n if a == 'True':\n return True\n else:\n return False", "def boolean(value):\n if isinstance(value, str):\n if value.lower() in [\"0\", \"no\", \"n\", \"false\", \"f\", \"off\"]:\n return False\n elif value.lower() in [\"1\", \"yes\", \"y\", \"true\", \"t\", \"on\"]:\n return True\n else:\n raise ValueError(\"Invalid truth value '%s'\" % value)\n elif isinstance(value, bool):\n return value\n else:\n raise TypeError(\"invalid type %s, expect bool or str\" % type(value))", "def boolean(raw):\n raw = raw.lower()\n if not raw in (\"true\", \"false\"):\n raise ValueError(\"Could not parse a boolean value from '%s'.\" % raw)\n return raw == \"true\"", "def to_bool(value):\n return value == 'true ' or value == 'true'", "def str_to_bool(s):\n s = s.lower()\n if s not in {'true', 'false', '1', '0'}:\n raise ValueError('Bool expected, but got %r' % s)\n return s in {'true', '1'}", "def isbool(s):\n if type(s) in [int,float,bool]:\n return True\n elif (type(s) != str):\n return False\n return s in ['True','False']", "def Bool(arg):\n return arg.lower() in ('y', 'true', 't', '1')", "def _str2bool(self, v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")", "def string_to_bool(arg):\r\n if arg.lower() == 'true':\r\n arg = True\r\n elif arg.lower() == 'false':\r\n arg = False\r\n else:\r\n raise ValueError('ValueError: Argument must be either \"true\" or \"false\".')\r\n return arg" ]
[ "0.79706836", "0.77999836", "0.77764535", "0.77737457", "0.7769025", "0.7752392", "0.7732956", "0.7688372", "0.7639961", "0.7552712", "0.7499345", "0.74967587", "0.74940103", "0.74873126", "0.74873126", "0.7462245", "0.7449587", "0.7444772", "0.7444772", "0.7442706", "0.7398069", "0.7397886", "0.736948", "0.73691815", "0.7369179", "0.7364611", "0.73504996", "0.73436576", "0.7339917", "0.7322064" ]
0.8482024
0
Like str, but allows a value of None to pass through asis.
def str_or_none(val): return str(val) if val is not None else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _coerce_str_unless_none(value):\n return str(value) if value is not None else None", "def string(self, value):\n # respect {None}\n if value is None:\n # by leaving it alone\n return None\n # my value knows\n return str(value)", "def noneToString(text):\n if text in (None, \"\"):\n return \"None\"\n else:\n return str(text)", "def to_safe_str_or_none(value: Optional[str]) -> Optional[str]:\n if value is None:\n return None\n v = str(value.strip()).replace('\\r', '').replace('\\n', '')\n return v or None", "def s2s(s):\n if (s is None): return \"\"\n else: return s", "def _nullify(self, value):\n if not str(value).strip():\n return None\n else:\n return value", "def noneType(value):\r\n return ''", "def get_nonempty_str(arg: Any) -> Optional[str]:\n if isinstance(arg, str):\n if len(arg) > 0:\n return str(arg)\n return None", "def silent_none(value):\n if value is None:\n return ''\n return value", "def string_none(value):\n is_string_none = not value or value.lower() == 'none'\n return None if is_string_none else value", "def stringToNone(text):\n if text in (\"\", None, \"None\"):\n return None\n else:\n return str(text)", "def make_string(value):\n if value:\n return str(value)\n return None", "def quote_or_null(self, val: str, quotes: str = \"'\", null: str = \"NULL\", strings_only = True) -> str:\n if val is None: return null\n if strings_only and isinstance(val, str):\n return f\"{quotes}{val}{quotes}\"\n return val", "def noneToBlankString(text):\n if text in (None, \"None\"):\n return \"\"\n else:\n return str(text)", "def changenonetoNone(s):\r\n if s=='None':\r\n return None\r\n else:\r\n return s", "def xstr(s):\n if s is None:\n return ''\n else:\n return str(s)", "def xstr(s):\n if s is None:\n return \"\"\n else:\n return str(s)", "def xstr(s):\n if s is None:\n return u''\n return s", "def xstr(s):\n if s is None:\n return ''\n return str(s)", "def convert(cls, value: Any) -> Optional[str]:\n # Can be optional\n if value is None:\n return None\n\n cls.assert_value_ok(isinstance(value, str), value)\n\n return value", "def set_string(prop, value: typing.Union[str, None]) -> str:\n if (value is None) or (value == \"\"):\n raise ValueError(\"{} cannot be Null\".format(str(prop)))\n if not(isinstance(value, str)):\n raise TypeError(\"{} can only be a string\".format(str(prop)))\n return value.strip()", "def convert_to_str(value):\n\tif value is None:\n\t\treturn '-'\n\treturn str(value)", "def none_to_empty(data):\n return data if data is not None else ''", "def force_text(s):\n if s is None:\n return \"\"\n if not isinstance(s, six.string_types):\n return six.text_type(s)\n return s", "def none_string(line):\n out_line = None if line.lower() == \"none\" or len(line) == 0 else line\n return out_line", "def to_str(val, default=None):\n str_val = default\n\n try:\n if not is_empty(val):\n str_val = str(val)\n except Exception as e:\n pass\n\n return str_val", "def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n if not val:\n return True\n return False", "def _force_string(x):\n if isinstance(x, basestring):\n return x\n else:\n return str(x)", "def test_optional_string_roundtrip():\n assert \"Hello, world!\" == Optional(String).read(\n Optional(String).to_bytes(\"Hello, world!\")\n )\n assert None == Optional(String).read(\n Optional(String).to_bytes(None)\n )", "def to_string(value: Any) -> str:\n return StringConverter.to_string_with_default(value, '')" ]
[ "0.8081559", "0.7763027", "0.76215655", "0.7473673", "0.74665016", "0.7368865", "0.7334071", "0.73068684", "0.71900374", "0.7161688", "0.7096358", "0.70624596", "0.70293933", "0.7020066", "0.7012749", "0.69696677", "0.6965904", "0.6909482", "0.6829046", "0.6783363", "0.6762439", "0.6646001", "0.663095", "0.65867496", "0.6580921", "0.6568962", "0.65384775", "0.65086925", "0.65071344", "0.6501947" ]
0.7903235
1
Compute the list of all declared slots for a class.
def GetAllSlots(cls): slots = [] for parent in cls.__mro__: slots.extend(getattr(parent, "__slots__", [])) return slots
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slots(self):\n return self.__slots.values()", "def get_potential_classes_for_slot(slot_number):\n return [\"econ\", \"biz\", \"wtf\"]", "def slots(cls, mangled=False):\n # type: (type, bool) -> Set[str]\n _slots = set() # type: Set[str]\n for base in inspect.getmro(cls):\n _slots.update(\n _mangle(s, base.__name__) if mangled else s\n for s in getattr(base, \"__slots__\", ())\n )\n return _slots", "def c(self) -> List[RegisterSlot]:\n return self._reg_slots", "def get_slots(self) -> int:", "def required_slots(self,tracker) -> List[Text]:", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n \"tipo_lavado\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"tipo_compostura\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]", "def _GetSlots(mcs, attrs):\n raise NotImplementedError", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"search_type\", \"time\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n ]", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def all(cls):\n return [(k, v) for k, v in cls.__members__.items()]", "def get_slot_names(self, *args, **kwargs):\n return self._optimizer.get_slot_names(*args, **kwargs)", "def get_defined_pair_classes(self):\n self._collect()\n if self.mode == 'dynamic':\n return []\n else:\n return self._preloaded_pair_classes", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"bug\", \"beverage\", \"second_person_plural\", \n \"cot_caught\", \"rain_sun\", \"crawfish\", \"halloween\",\n \"sandwich\", \"side_road\", \"shoes\", \"highway\", \"yard_sale\",\n \"rubbernecking\", \"frosting\", \"lawyer\", \"kitty_corner\",\n \"firefly\", \"verge\", \"brew_thru\", \"water_fountain\"]", "def list(cls):\n return [cls.__dict__.get(name) for name in dir(cls) if (\n not callable(getattr(cls, name)) and not name.startswith(\"_\")\n )]", "def get_instance_layout(cls):\n\n layout = {}\n for name, nodes in get_instance_definitions(cls).items():\n\n # Drop functions.\n\n only_methods = 1\n for node in nodes:\n if not isinstance(node, compiler.ast.Function):\n only_methods = 0\n break\n\n if not only_methods:\n layout[name] = nodes\n\n return layout", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]", "def get_instance_definitions(cls):\n\n definitions = {}\n for instance in cls._instances:\n for name, nodes in instance._namespace.items():\n if not definitions.has_key(name):\n definitions[name] = []\n for node in nodes:\n definitions[name].append(node)\n return definitions", "def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]", "def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }", "def getSlotMap(self):\n slotMap = dict()\n for entry in self.slots:\n slotMap[entry] = self.__getattribute__(\"on_\" + entry)\n return slotMap", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"credit_card\", \"payment_amount\", \"time\", \"confirm\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"domicilio\",\n \"fecha_hora\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]", "def timeslot(self) -> List[TimeslotTimeslot]:\n return self._timeslot", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]", "def mem(self) -> List[MemorySlot]:\n return self._mem_slots" ]
[ "0.6665447", "0.6540712", "0.63587636", "0.6284265", "0.6129058", "0.60046625", "0.5797897", "0.57293785", "0.5724125", "0.5720947", "0.5678318", "0.56717503", "0.55985445", "0.55912197", "0.554557", "0.54841936", "0.5479285", "0.5446785", "0.54353386", "0.54201335", "0.5413183", "0.5412466", "0.5389524", "0.5359903", "0.53481025", "0.5330106", "0.53219354", "0.53125566", "0.52888256", "0.5284781" ]
0.75151503
0