query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Create, update, and delete volume attachments via patch [Arguments] | def fusion_api_patch_storage_volume_attachments(self, body, param='', api=None, headers=None):
return self.volume_attachment.patch(body=body, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_aws_service_api_volume_attachment_put(self):\n pass",
"def test_aws_service_api_volume_attachment_delete(self):\n pass",
"def test_manage_volume_attachments(self, volume, instance, volumes_steps):\n volumes_steps.attach_instance(volume.name, instance.name)\n volumes_steps.detach_instance(volume.name, instance.name)",
"def test_attachment_deletion_allowed_multiple_attachment(self):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment, attachment])\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None, volume)",
"def test_attachment_deletion_allowed_mismatched_volume_and_attach_id(\n self, mock_get_attatchment):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n attachment2 = self._get_attachment()\n attachment2.volume_id = attachment.volume.id = fake.VOLUME2_ID\n self.assertRaises(exception.InvalidInput,\n self.volume_api.attachment_deletion_allowed,\n self.context, attachment2.id, volume)\n mock_get_attatchment.assert_called_once_with(self.context,\n attachment2.id)",
"def test_patch_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume_1 = synthetic_volume_full(host)\n spare_volume_2 = synthetic_volume_full(host)\n\n response = self.api_client.patch(\n \"/api/target/\",\n data={\n \"objects\": [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_1.id},\n {\"kind\": \"MDT\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_2.id},\n ],\n \"deletions\": [],\n },\n )\n self.assertHttpAccepted(response)",
"def multipart():\n with commit():\n link_and_create_multipart_volumes()\n reindex_pidtype('docid')\n reindex_pidtype('serid')",
"def test_attachment_deletion_allowed_attachment_from_volume(\n self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n volume.id)",
"def test_attachment_deletion_allowed_volume_no_attachments(self):\n volume = tests_utils.create_volume(self.context)\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)",
"def test_attachment_update_volume_in_error_state(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n vref.status = 'error'\n vref.save()\n connector = {'fake': 'connector',\n 'host': 'somehost'}\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_update,\n self.context,\n aref,\n connector)",
"def test_attachment_create_readonly_volume(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.volume_api.update_readonly_flag(self.context, vref, True)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('ro', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def test_additional_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual(2, len(vref.volume_attachment))",
"def test_attachment_create_creating_volume(self):\n volume_params = {'status': 'creating'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)",
"def patch_volumes(\n self,\n references=None, # type: List[models.ReferenceType]\n volume=None, # type: models.VolumePatch\n authorization=None, # type: str\n x_request_id=None, # type: str\n ids=None, # type: List[str]\n names=None, # type: List[str]\n truncate=None, # type: bool\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeResponse\n kwargs = dict(\n volume=volume,\n authorization=authorization,\n x_request_id=x_request_id,\n ids=ids,\n names=names,\n truncate=truncate,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volumes_api.api20_volumes_patch_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n return self._call_api(endpoint, kwargs)",
"def test_aws_service_api_volume_patch(self):\n pass",
"def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])",
"def patch_resource(self, namespace: \"str\" = None) -> \"VolumeAttachmentStatus\":\n names = [\"patch_namespaced_volume_attachment\", \"patch_volume_attachment\"]\n\n response = _kube_api.execute(\n action=\"patch\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict(), \"name\": self.metadata.name},\n )\n\n output = VolumeAttachmentStatus()\n if response is not None:\n output.from_dict(_kube_api.to_kuber_dict(response.status))\n return output",
"def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')",
"def test_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def test_upload_attachment_to_container(fake_note_with_video_attachment, fake_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n base_url = 'https://dt-fs-test2.crm.crm.dynamics.com'\n oauth_url = 'https://dtdv-video-index-uspklrodz4yzi.azurewebsites.net/api/Dynamic365AuthToken?code=V5UYqIu=='\n\n oauth_token = \"AAABBBCCCDDDEEE\"\n\n account_name = \"storage_account_a\"\n container = \"container_a\"\n\n api_uri = base_url + Note.ATTACHMENT_ENDPOINT.format(note_id=note_id_value)\n a_note = Note.from_dict(note_id_value, fake_note_with_video_attachment)\n filename = a_note.filename\n\n blob_storage_endpoint = f\"https://{account_name}.blob.core.windows.net/{container}/{filename}\"\n\n rest_headers = {}\n responses.add(responses.GET, api_uri, json=fake_attachment, status=HTTPStatus.OK)\n responses.add(responses.POST, oauth_url, json={\"token\": oauth_token}, status=HTTPStatus.OK)\n responses.add(responses.PUT, blob_storage_endpoint, json={}, status=HTTPStatus.CREATED)\n\n downloaded_file = a_note.download_attachment(base_url, rest_headers)\n TAG_A = \"tag_a\"\n TAG_B = \"tag_b\"\n metadata_tags = {TAG_A: \"value_a\", TAG_B: \"value_b\"}\n assert a_note.upload_attachment_to_container(downloaded_file, metadata_tags, account_name, container, oauth_url)\n assert len(responses.calls) == 3\n assert responses.calls[0].request.url == api_uri\n assert responses.calls[1].request.url == oauth_url\n assert responses.calls[2].request.url == blob_storage_endpoint",
"def create_multipart_volumes(pid, multipart_legacy_recid, migration_volumes):\n volumes = {}\n # Combine all volume data by volume number\n click.echo('Creating volume for {}...'.format(multipart_legacy_recid))\n for obj in migration_volumes:\n volume_number = obj['volume']\n if volume_number not in volumes:\n volumes[volume_number] = {}\n volume = volumes[volume_number]\n for key in obj:\n if key != 'volume':\n if key in volume:\n raise KeyError(\n 'Duplicate key \"{}\" for multipart {}'.format(\n key,\n multipart_legacy_recid\n )\n )\n volume[key] = obj[key]\n\n volume_numbers = iter(sorted(volumes.keys()))\n\n # Re-use the current record for the first volume\n # TODO review this - there are more cases of multiparts\n first_volume = next(volume_numbers)\n first = Document.get_record_by_pid(pid)\n if 'title' in volumes[first_volume]:\n first['title'] = volumes[first_volume]['title']\n first['volume'] = first_volume\n first['_migration']['multipart_legacy_recid'] = multipart_legacy_recid\n # to be tested\n if 'legacy_recid' in first:\n del first['legacy_recid']\n first.commit()\n yield first\n\n # Create new records for the rest\n for number in volume_numbers:\n temp = first.copy()\n temp['title'] = volumes[number]['title']\n temp['volume'] = number\n record_uuid = uuid.uuid4()\n provider = DocumentIdProvider.create(\n object_type='rec',\n object_uuid=record_uuid,\n )\n temp['pid'] = provider.pid.pid_value\n record = Document.create(temp, record_uuid)\n record.commit()\n yield record",
"def link_and_create_multipart_volumes():\n click.echo('Creating document volumes and multipart relations...')\n search = DocumentSearch().filter('term', _migration__is_multipart=True)\n for hit in search.scan():\n if 'legacy_recid' not in hit:\n continue\n click.secho('Linking multipart {}...'.format(hit.legacy_recid),\n fg='green')\n multipart = get_multipart_by_legacy_recid(hit.legacy_recid)\n documents = create_multipart_volumes(\n hit.pid,\n hit.legacy_recid,\n hit._migration.volumes\n )\n\n for document in documents:\n if document and multipart:\n click.echo(\n 'Creating relations: {0} - {1}'.format(multipart['pid'],\n document['pid']))\n create_parent_child_relation(\n multipart,\n document,\n MULTIPART_MONOGRAPH_RELATION,\n document['volume']\n )",
"def wrapup(self):\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc[\"_rev\"] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc,\n attachment[\"content\"],\n filename=attachment[\"filename\"],\n content_type=attachment[\"mimetype\"])",
"def test_attachment_create_with_connector(self,\n mock_rpc_attachment_update):\n volume_params = {'status': 'available'}\n connection_info = {'fake_key': 'fake_value',\n 'fake_key2': ['fake_value1', 'fake_value2']}\n mock_rpc_attachment_update.return_value = connection_info\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n connector = {'fake': 'connector'}\n attachment = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2,\n connector)\n mock_rpc_attachment_update.assert_called_once_with(self.context,\n mock.ANY,\n connector,\n mock.ANY)\n new_attachment = objects.VolumeAttachment.get_by_id(self.context,\n attachment.id)\n self.assertEqual(connection_info, new_attachment.connection_info)",
"def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)",
"def test_attach_volume_ignore_VolumeAttachmentNotFound(\n self, mock_notify, mock_elevate, mock_event, mock_debug_log):\n mock_elevate.return_value = self.context\n\n attachment_id = uuids.attachment_id\n fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)\n fake_bdm.attachment_id = attachment_id\n instance = self._create_fake_instance_obj()\n expected_exception = test.TestingException()\n\n def fake_attach(*args, **kwargs):\n raise expected_exception\n\n with test.nested(\n mock.patch.object(driver_block_device.DriverVolumeBlockDevice,\n 'attach'),\n mock.patch.object(cinder.API, 'attachment_delete'),\n mock.patch.object(objects.BlockDeviceMapping,\n 'destroy')\n ) as (mock_attach, mock_attach_delete, mock_destroy):\n mock_attach.side_effect = fake_attach\n mock_attach_delete.side_effect = \\\n exception.VolumeAttachmentNotFound(\n attachment_id=attachment_id)\n self.assertRaises(\n test.TestingException, self.compute.attach_volume,\n self.context, instance, fake_bdm)\n mock_destroy.assert_called_once_with()\n mock_notify.assert_has_calls([\n mock.call(self.context, instance, 'fake-mini',\n action='volume_attach', phase='start',\n volume_id=uuids.volume_id),\n mock.call(self.context, instance, 'fake-mini',\n action='volume_attach', phase='error',\n volume_id=uuids.volume_id,\n exception=expected_exception),\n ])\n mock_event.assert_called_once_with(\n self.context, 'compute_attach_volume', CONF.host,\n instance.uuid, graceful_exit=False)\n self.assertIsInstance(mock_debug_log.call_args[0][1],\n exception.VolumeAttachmentNotFound)",
"def execute(self,\n context: context.RequestContext,\n optional_args: dict,\n **kwargs) -> dict[str, Any]:\n\n src_volid = kwargs.get('source_volid')\n src_vol = None\n if src_volid is not None:\n src_vol = objects.Volume.get_by_id(context, src_volid)\n bootable = False\n if src_vol is not None:\n bootable = src_vol.bootable\n elif kwargs.get('snapshot_id'):\n snapshot = objects.Snapshot.get_by_id(context,\n kwargs.get('snapshot_id'))\n volume_id = snapshot.volume_id\n snp_vol = objects.Volume.get_by_id(context, volume_id)\n if snp_vol is not None:\n bootable = snp_vol.bootable\n availability_zones = kwargs.pop('availability_zones')\n volume_properties = {\n 'size': kwargs.pop('size'),\n 'user_id': context.user_id,\n 'project_id': context.project_id,\n 'status': 'creating',\n 'attach_status': fields.VolumeAttachStatus.DETACHED,\n 'encryption_key_id': kwargs.pop('encryption_key_id'),\n # Rename these to the internal name.\n 'display_description': kwargs.pop('description'),\n 'display_name': kwargs.pop('name'),\n 'multiattach': kwargs.pop('multiattach'),\n 'bootable': bootable,\n }\n if len(availability_zones) == 1:\n volume_properties['availability_zone'] = availability_zones[0]\n\n # Merge in the other required arguments which should provide the rest\n # of the volume property fields (if applicable).\n volume_properties.update(kwargs)\n volume = objects.Volume(context=context, **volume_properties)\n volume.create()\n\n # FIXME(dulek): We're passing this volume_properties dict through RPC\n # in request_spec. This shouldn't be needed, most data is replicated\n # in both volume and other places. We should make Newton read data\n # from just one correct place and leave just compatibility code.\n #\n # Right now - let's move it to versioned objects to be able to make\n # non-backward compatible changes.\n\n volume_properties = objects.VolumeProperties(**volume_properties)\n\n return {\n 'volume_id': volume['id'],\n 'volume_properties': volume_properties,\n # NOTE(harlowja): it appears like further usage of this volume\n # result actually depend on it being a sqlalchemy object and not\n # just a plain dictionary so that's why we are storing this here.\n #\n # In the future where this task results can be serialized and\n # restored automatically for continued running we will need to\n # resolve the serialization & recreation of this object since raw\n # sqlalchemy objects can't be serialized.\n 'volume': volume,\n }",
"def _patch(self, path=None, version=None, params=None,\n data=None, json=None, header=None):\n return self.client.patch(module='mam', path=path, version=version,\n params=params, data=data,\n json=json, header=header)",
"def patch(self, controller_fs_uuid, patch):\n raise exception.OperationNotPermitted",
"async def create_or_update(\n self,\n farmer_id: str,\n attachment_id: str,\n file: Optional[IO] = None,\n farmer_id1: Optional[str] = None,\n resource_id: Optional[str] = None,\n resource_type: Optional[str] = None,\n original_file_name: Optional[str] = None,\n id: Optional[str] = None,\n status: Optional[str] = None,\n created_date_time: Optional[str] = None,\n modified_date_time: Optional[str] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n e_tag: Optional[str] = None,\n **kwargs: Any\n ) -> \"_models.Attachment\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Attachment\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-03-31-preview\"\n content_type = kwargs.pop(\"content_type\", \"multipart/form-data\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_or_update.metadata['url'] # type: ignore\n path_format_arguments = {\n 'Endpoint': self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n 'farmerId': self._serialize.url(\"farmer_id\", farmer_id, 'str'),\n 'attachmentId': self._serialize.url(\"attachment_id\", attachment_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n # Construct form data\n _form_content = {\n 'file': file,\n 'FarmerId': farmer_id1,\n 'ResourceId': resource_id,\n 'ResourceType': resource_type,\n 'OriginalFileName': original_file_name,\n 'Id': id,\n 'Status': status,\n 'CreatedDateTime': created_date_time,\n 'ModifiedDateTime': modified_date_time,\n 'Name': name,\n 'Description': description,\n 'ETag': e_tag,\n }\n request = self._client.patch(url, query_parameters, header_parameters, form_content=_form_content)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error)\n\n if response.status_code == 200:\n deserialized = self._deserialize('Attachment', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Attachment', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized"
] | [
"0.69271636",
"0.6763079",
"0.6706392",
"0.6365676",
"0.6276474",
"0.62318283",
"0.61483425",
"0.6080413",
"0.6048534",
"0.60193163",
"0.60192627",
"0.600426",
"0.5877835",
"0.5871101",
"0.57794565",
"0.572767",
"0.5701827",
"0.5693271",
"0.568384",
"0.5607973",
"0.5607061",
"0.55993915",
"0.5589938",
"0.55679125",
"0.5538006",
"0.55212104",
"0.5508822",
"0.550515",
"0.5489066",
"0.54782647"
] | 0.7713857 | 0 |
Updates a Switch. [Arguments] | def fusion_api_edit_switch(self, body, uri, api=None, headers=None):
return self.switch.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_switch(self, value):\n act = SwitchAction(self, value)\n return act.invoke()",
"def switch_changed(self, switch, name):\n section, option = name\n v = (\"1\" if switch.value else \"0\")\n _stash.config.set(section, option, v)\n self.save()",
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def update(*args):",
"def updateSwitch(self, deviceName, propertyName):\n\n if self.device is None:\n return False\n if deviceName != self.name:\n return False\n\n for element, value in self.device.getSwitch(propertyName).items():\n key = propertyName + '.' + element\n self.data[key] = value\n\n # this combination only exists in version 1\n if propertyName == 'AUTO_DEW' and element == 'AUTO_DEW_ENABLED':\n if self.versionUPB != 1:\n self.versionUPB = 1\n self.signals.version.emit(1)\n\n # print(propertyName, element, value)\n\n return True",
"def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)",
"def update(self, args):\n pass",
"def __process_xx_switch_arg(self, argument):\n _method_name = '__process_xx_switch_arg'\n\n match = self.__xx_args_switch_regex.match(argument)\n xarg = match.group(2)\n on_or_off = match.group(1)\n if on_or_off == '+':\n on_or_off_text = 'on'\n else:\n on_or_off_text = 'off'\n\n if 'switch' not in self.__xx_args:\n self.__xx_args['switch'] = OrderedDict()\n self._logger.finer('WLSDPLY-08304', argument, xarg, on_or_off_text,\n class_name=self._class_name, method_name=_method_name)\n self.__xx_args['switch'][xarg] = on_or_off",
"def set_switch(self, values):\n for label, val in values.items():\n # if label is not a label, get the according label\n if isinstance(label, (ArduinoSwitchControlSwitch,\n ArduinoSwitchControlConnector)):\n label = label.label\n # if switch label, get the parameter to switch the switch\n if label in self.switches:\n par = self.parameters[f'switch_{label}_mode']\n # if input label, get the parameter to set a route from this input\n elif label in self.inputs:\n par = self.parameters[f'route_{label}_mode']\n # if parameter name, get the right parameter\n elif label.startswith('switch_'):\n if label[7:] not in [str(lab) for lab in self.switches]:\n raise SwitchError(f\"No switch with label {label[7:]}\")\n par = self.parameters[f'{label}_mode']\n elif label.startswith('route_'):\n if label[6:] not in [str(lab) for lab in self.inputs]:\n raise ConnectorError(f\"No input with label {label[6:]}\")\n if f'{label}_mode' not in self.parameters:\n raise RouteError(f\"No route starting at input {label[6:]}\")\n par = self.parameters[f'{label}_mode']\n else:\n raise ValueError(f\"parameter label {label} not recognized.\")\n\n # apply selected parameter for switching\n par(val)",
"def update(self, *args, **kw):\n pass",
"def swap(self, *args, **kwargs):\n return self.switch(*args, **kwargs)",
"def update(self, t):\n self.state.send(t)",
"def update():",
"def update():",
"def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")",
"def _set_switch(self, switch, state):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = str(id[0]) + str(id[1]) + str(state)\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.WRITE_DELAY)\n # read switch after setting it, to confirm switching\n try:\n self._get_switch(switch)\n except SwitchError:\n raise SwitchError(\"Reading switch after switching was \"\n \"unsuccessful: Indicators of the switch show \"\n f\"{switch.indicators}.\")\n # raise error, if the switching was not successful\n if switch.state != state:\n raise SwitchError(\"Setting the switch was unsuccessful. The \"\n f\"switch should be in state {state}, but \"\n f\"the indicators show state {switch.state}.\")",
"def SwitchMode(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('switchMode', payload=payload, response_object=None)",
"async def update(self):\n resp = await self._request('get', 'state')\n if resp:\n for line in resp.splitlines():\n key, val = line.strip().split(None, 1)\n if val == 'on' or val == 'off':\n val = (val == 'on')\n self.state_data[key] = val\n else:\n self.state_data[key] = val",
"async def async_device_update(self, warning=True):\n LOGGER.info(\"Update switch {name}\".format(name=self.name))\n await self.heater.async_update()",
"def update(self, *args, **kwargs):",
"def switch_update(event):\n if (\n not isinstance(event.device, rfxtrxmod.LightingDevice)\n or event.device.known_to_be_dimmable\n or event.device.known_to_be_rollershutter\n ):\n return\n\n new_device = get_new_device(event, config, RfxtrxSwitch)\n if new_device:\n new_device.apply_event(event)\n add_entities_callback([new_device])",
"def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]",
"def __checkSwitch ( self, letter, value ):\n\n #-- 1 --\n # [ if letter is a key in self.switchMap -> I\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n if not self.switchMap.has_key ( letter ):\n usage ( self.switchSpecs, self.posSpecs,\n \"No such switch: -%s\" % letter )\n\n #-- 2 --\n if len(value) == 0:\n self.switchMap[letter] = 1\n else:\n self.switchMap[letter] = value",
"def update(self):\n self._state = 23",
"def update_based_on_topology(self, *args, **kwargs):\n for bfr in Configuration.get(\"switches\"):\n switch = bfr[\"name\"]\n\n self.update_bier_decap_rule(switch=switch)",
"def _add_to_switch(self, _switch, context):\n _network = context.current['id']\n _vlanid = context.current['provider:segmentation_id']\n\n # BRIDGE_PORT_URL = '{url_prefix}://{switch_name_or_ip}:{port}/networks/{vlan}/{network_id}/{port_id}'\n for _switchport in _switch.get('ports'):\n try:\n _request = requests.put(\n BRIDGE_PORT_URL.format(url_prefix=self.url_prefix,\n port=self.protocol_port,\n switch_name_or_ip=_switch.get('name'),\n vlan=unicode(_vlanid),\n network_id=_network,\n port_id=_switchport)\n )\n LOG.info(\n _LI('Sending PUT API Call to Switch %s'),\n _request.url\n )\n if _request.status_code != requests.codes.ok:\n LOG.error(\n _LE(\"Failed To Provision Switch %s\"), _request.text)\n raise MechanismDriverError()\n except ConnectionError:\n LOG.error(\n _LE('Failed to connect to switch %s'),\n _request.url\n )",
"def bcp_switch(self, name, state, **kwargs):\n if int(state):\n self.events.post('switch_' + name + '_active')\n else:\n self.events.post('switch_' + name + '_inactive')",
"def update( ):\r\n pass",
"def update(self, *args, **kwargs):\n pass"
] | [
"0.6282466",
"0.6170663",
"0.6168931",
"0.61634696",
"0.60626763",
"0.5929562",
"0.5883229",
"0.5791149",
"0.578171",
"0.5781447",
"0.5766732",
"0.57095605",
"0.57047874",
"0.5698562",
"0.5698562",
"0.5691825",
"0.5651865",
"0.5648597",
"0.5609185",
"0.5602009",
"0.5587439",
"0.55839884",
"0.5576668",
"0.5496435",
"0.5496296",
"0.5483711",
"0.5469163",
"0.5468306",
"0.54667014",
"0.54566824"
] | 0.6216313 | 1 |
Refreshes a Switch based on uri provided [Arguments] | def fusion_api_refresh_switch(self, uri, api=None, headers=None):
return self.switch.refresh(uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def refresh(self, url, args, cancellationSignal):\n pass",
"def fusion_api_edit_switch(self, body, uri, api=None, headers=None):\n return self.switch.update(body, uri, api, headers)",
"def fusion_api_get_switch(self, uri=None, param='', api=None, headers=None):\n return self.switch.get(uri=uri, api=api, headers=headers, param=param)",
"def call(self, uri, method, arg, extras):\n pass",
"def fusion_api_refresh_server_hardware(self, body={\"refreshState\": \"RefreshPending\"}, uri=None, api=None, headers=None):\n return self.sh.update(body, uri=uri, api=api, headers=headers, param='/refreshState')",
"def stream_changed(self, uri):\n pass",
"def Refresh(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"refresh\", payload=payload, response_object=None)",
"def switch(self, url):\r\n self._authsvn('switch', [url])",
"def handleReload(self, confInfo=None):",
"def update(self, uri, values, where, selectionArgs):\n pass",
"def refresh(*args, currentView: bool=True, fileExtension: AnyStr=\"\", filename: AnyStr=\"\",\n force: bool=True, suspend: bool=True, **kwargs)->None:\n pass",
"def refresh_view():\n pass",
"def get_switch_state(self, path, params):\n switch = params.get('switch')\n port = params.get('port')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_switch_state(switch, port, host)\n self._augment_state_reply(reply, path)\n return reply",
"def refresh():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"reload\")\n else:\n cmd = _traffic_line(\"-x\")\n\n return _subprocess(cmd)",
"def swatchRefresh(*args, **kwargs)->bool:\n pass",
"def command_reload(interface,command,args):\n command_unload(interface,command,args)\n command_load(interface,command,args)",
"def fusion_api_refresh_power_device(self, body, uri, api=None, headers=None):\n return self.pd.update(body=body, uri=uri, api=api, headers=headers, param='/refreshState')",
"def notifyChange(self, uri, observer, syncToNetwork=None, flags=None):\n pass",
"def refresh_urls(environ, start_response):\n store = environ['tiddlyweb.store']\n config = environ['tiddlyweb.config']\n \n register_urls(store, config)\n \n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n return 'All URLs have been updated'",
"def set_uri(self, uri):\r\n self.uri = uri",
"def refresh():\n\tsocketio.emit('refresh')\n\treturn status()",
"def refresh_screen(self):",
"def uri(self, uri):\n self._uri = uri",
"def uri(self, uri):\n self._uri = uri",
"def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')",
"def toggle(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.toggle())\n console.print(f\"[{ip}] Light {id} Toggle:\\n{json.dumps(resp, indent=2)}\")",
"def launch(uri):\n comp=urlparse.urlparse(uri)\n handler=get(comp[0])\n if not handler:\n return\n if '%s' in handler:\n cmd=handler % uri\n else:\n cmd=handler+' '+uri\n #print cmd\n\n return os.spawnlp(os.P_NOWAIT, 'sh', 'sh', '-c', cmd)",
"def switch(self, context):\n return",
"def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)",
"def command_refresh_repo(self):\n repoinit.refresh(*self.args())"
] | [
"0.6420177",
"0.5545554",
"0.53256416",
"0.52885365",
"0.519113",
"0.5158461",
"0.5119947",
"0.5117481",
"0.5094451",
"0.50451607",
"0.4990647",
"0.49901125",
"0.49833083",
"0.49725384",
"0.49562463",
"0.49557462",
"0.4953017",
"0.4822759",
"0.48081386",
"0.47947106",
"0.47825053",
"0.47777334",
"0.47765326",
"0.47765326",
"0.4767245",
"0.47647655",
"0.4762434",
"0.47572845",
"0.4747057",
"0.47444004"
] | 0.62286013 | 1 |
Removes a Switch based on name OR uri provided [Arguments] | def fusion_api_remove_switch(self, name=None, uri=None, api=None, headers=None):
return self.switch.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _remove_from_switch(self, _switch, context):\n _network = context.current['id']\n _vlanid = context.current['provider:segmentation_id']\n\n # BRIDGE_PORT_URL = '{url_prefix}://{switch_name_or_ip}:{port}/networks/{vlan}/{network_id}/{port_id}'\n for _switchport in _switch.get('ports'):\n _request = requests.delete(\n BRIDGE_PORT_URL.format(url_prefix=self.url_prefix,\n port=self.protocol_port,\n switch_name_or_ip=_switch.get('name'),\n vlan=unicode(_vlanid),\n network_id=_network,\n port_id=_switchport)\n )\n LOG.info(\n _LI('Sending DELETE API Call to Switch %s'),\n _request.url\n )\n if _request.status_code != requests.codes.ok:\n LOG.error(\n _LE(\"Failed To Provision Switch %s\"), _request.text)\n raise MechanismDriverError()",
"def test_lswitch_uninstall(self):\n self._common_uninstall_delete(\n 'id', lswitch.delete,\n {'switch': {}},\n ['logicalSwitch'], {\n 'uri_parameters': {'virtualWireID': 'id'}\n },\n additional_params=['vsphere_network_id']\n )",
"def remove(name):",
"def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"",
"def del_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()",
"def removeControl(*args):",
"def removeControl(*args):",
"def removeControl(*args):",
"def removeControl(*args):",
"def remove(self, uri):\n\n uri = uri.strip('/')\n if self.exists(uri):\n parts = uri.rsplit(\"/\", 1)\n if len(parts) == 1:\n self.nodes.pop(parts[0])\n else:\n node = self.get(parts[0])\n node.pop(parts[1], None)",
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def unregister_router(self, hostname):",
"def fusion_api_remove_firmware_driver(self, name=None, uri=None, api=None, headers=None):\n return self.driver.delete(name, uri, api, headers)",
"def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def fusion_api_get_switches_without_ports(self, uri=None, api=None, headers=None):\n return self.switch.get(uri=uri, api=api, headers=headers, param='/withoutPorts')",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def fusion_api_remove_power_device(self, name=None, uri=None, api=None, headers=None):\n return self.pd.delete(name=name, uri=uri, api=api, headers=headers)",
"def main_remove(args):\n return remove_command(args.directory, args.name)",
"def remove_binding(ctx, binding_name):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete binding, which is not present.\")\n entryFound = True\n\n if entryFound == False:\n config_db.set_entry(table, key, None)",
"def rm(args):\n args.delete = True\n return remove(args)",
"def remove_machine(self, url):\n\n model = TestMachine.objects.filter(url=url).first()\n if model:\n self.deactivate_model(model)\n print \"Removed test machine: %s\" % url",
"def unregister(self, name: str, opset: OpsetVersion) -> None:\n if name not in self._registry:\n return\n self._registry[name].remove_custom(opset)",
"def removeItem(*args):",
"def removeItem(*args):",
"def clear_single_switch_rules(switch_id,in_port,out_port):\n print(\"** Remove flows from {}\".format(switch_id))\n in_rule = \"in_port={}\".format(in_port)\n out_rule = \"in_port={}\".format(out_port)\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,in_rule],\n stdout=subprocess.PIPE).wait()\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,out_rule],\n stdout=subprocess.PIPE).wait()\n\n ### If debugging, remove the comments below to see what the flow rules are\n # result = subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"dump-flows\",switch_id],\n # stdout=subprocess.PIPE).communicate()[0]\n # print (result)",
"def remove_service(self, zeroconf, service_type, name):",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def remove(self, name_or_klass):\n _logger().log(5, 'removing mode %r', name_or_klass)\n mode = self.get(name_or_klass)\n mode.on_uninstall()\n self._modes.pop(mode.name)\n return mode",
"def remove(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))"
] | [
"0.64799225",
"0.60489744",
"0.58654183",
"0.5754106",
"0.5738819",
"0.5728677",
"0.5728677",
"0.5728677",
"0.5728677",
"0.5618364",
"0.5616719",
"0.5604793",
"0.5554135",
"0.55125505",
"0.55086726",
"0.5494269",
"0.5492298",
"0.5490624",
"0.5460127",
"0.5402718",
"0.53422207",
"0.53343135",
"0.5304767",
"0.5280252",
"0.5280252",
"0.5240278",
"0.52336884",
"0.52330136",
"0.5229445",
"0.5191513"
] | 0.74323237 | 0 |
Gets a default or paginated collection of Switches without ports info [Arguments] | def fusion_api_get_switches_without_ports(self, uri=None, api=None, headers=None):
return self.switch.get(uri=uri, api=api, headers=headers, param='/withoutPorts') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switches(self) -> List[dict]:\n return self.items_by_domain(\"switch\")",
"def get_all_switches(name):\n return [False,False,False,False] #TODO Implement",
"def list_switches(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Switch\"]",
"def get_all_switch(self, conf):\n\t\tpass",
"def get_switches(self) -> tuple:\n return self.switches",
"def list_opts():\n return [(None, copy.deepcopy(service_opts))]",
"async def get_switches(self):\n return await self.get_states_by_tag_prefix(\"led\")",
"def get_switches_stats(self, site_id: str) -> List:\n try:\n stats = self.api.get(host=self.host, endpoint=f\"/api/v1/sites/{site_id}/stats/devices?type=switch\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting switch stats:{TextColors.ENDC} {e}\")\n raise e\n return stats",
"def get(self, *args):\n return _libsbml.ListOfPorts_get(self, *args)",
"def get_all(options=None):\n return dict([(port_name, get(port_name, options=options))\n for port_name in ALL_PORT_NAMES])",
"def switches(self):\n return {k:v for k, v in self._data.items() \n if v[\"type\"] == \"SWITCH\"}",
"def getSwitchInfo():\n swDB = switchdb.DB()\n raw_info = swDB.getAllSummary()\n switchList = []\n for row in raw_info:\n row = list(row)\n switch = {}\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"swver\"] = row[2]\n switch[\"ip\"] = row[3]\n switch[\"check\"] = row[4]\n switch[\"total\"] = row[5]\n switch[\"up\"] = row[6]\n switch[\"down\"] = row[7]\n switch[\"disabled\"] = row[8]\n if switch[\"total\"] == 0:\n switch[\"capacity\"] = 0\n else:\n switch[\"capacity\"] = (switch[\"up\"] / switch[\"total\"]) * 100\n switchList.append(switch)\n swDB.close()\n return switchList",
"def get_of_switches(self):\n try:\n of_response = requests.get(self.url + \"restconf/operational/opendaylight-inventory:nodes\",\n headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(\"Error get_of_switches \" + error_text)\n\n self.logger.debug(\"get_of_switches \" + error_text)\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"get_of_switches. Unexpected response, not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, not a dict. Wrong version?\")\n\n nodes = info.get('nodes')\n if type(nodes) is not dict:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s\",\n str(type(info)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes', not found or not a dict.\"\n \" Wrong version?\")\n\n node_list = nodes.get('node')\n if type(node_list) is not list:\n self.logger.error(\"get_of_switches. Unexpected response, at 'nodes':'node', \"\n \"not found or not a list: %s\", str(type(node_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, at 'nodes':'node', not found \"\n \"or not a list. Wrong version?\")\n\n switch_list = []\n for node in node_list:\n node_id = node.get('id')\n if node_id is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s\",\n str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'id', not found. \"\n \"Wrong version?\")\n\n if node_id == 'controller-config':\n continue\n\n node_ip_address = node.get('flow-node-inventory:ip-address')\n if node_ip_address is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:\"\n \"ip-address', not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found. Wrong version?\")\n\n node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)\n switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])),\n node_ip_address))\n return switch_list\n\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)",
"def list_ports(state):\n\tstate.report()",
"def fetch_router_list(args):\n nd = NetDevices(production_only=opts.nonprod)\n ret = []\n blocked_groups = []\n if args:\n for arg in args:\n # Try to find the device, but fail gracefully if it can't be found\n device = device_match(arg)\n if not pass_filters(device) or device is None:\n continue\n ret.append(device)\n\n else:\n for entry in nd.itervalues():\n if entry.owningTeam in blocked_groups:\n continue\n if not pass_filters(entry):\n continue\n ret.append(entry)\n\n return sorted(ret, reverse=True)",
"def get_switching_options(self):\n\n return self._switch_opt_infos.iterkeys()",
"def get_switch_port_map(self,switch_name):\n\n # Now do a sort and return a map having port nos & connected devices\n myswitch_pmap = []\n self.sw_port_mapping[switch_name].sort()\n idx = 1\n for swname in self.sw_port_mapping[switch_name]:\n myswitch_pmap.append( (idx, swname) )\n idx = idx + 1\n return myswitch_pmap",
"def list(sw, args):\n parser = argparse.ArgumentParser(\n prog='space channel list',\n description='List channels in spacewalk.'\n )\n parser.add_argument(\n 'type',\n choices=[\n 'all',\n 'user',\n 'popular',\n 'retired',\n 'shared',\n 'software',\n 'vendor'\n ],\n default='popular',\n help=\"Type of search you would like to perform\"\n )\n parser.add_argument(\n '--format',\n choices=[\n 'raw',\n 'json',\n 'pretty'\n ],\n default='pretty',\n required=False\n )\n parser.add_argument(\n '--popcount',\n default=None,\n help=('channels with at least this many systems ' +\n 'subscribed will be returned')\n )\n\n api_calls = {\n 'all': 'channel.listAllChannels',\n 'user': 'channel.listMyChannels',\n 'popular': 'channel.listPopularChannels',\n 'retired': 'channel.listRetiredChannels',\n 'shared': 'channel.listSharedChannels',\n 'software': 'channel.listSoftwareChannels',\n 'vendor': 'channel.listVendorChannels'\n }\n\n p = parser.parse_args(args)\n\n if p.type == 'popular' and not p.popcount:\n print(\"Popular requires popcount arg.\")\n parser.print_help()\n return False\n\n if p.popcount:\n popcount = int(p.popcount)\n results = sw.call(\n api_calls[p.type],\n popcount\n )\n else:\n results = sw.call(\n api_calls[p.type]\n )\n if results == []:\n print(\"Empty result set.\")\n\n channels = []\n for result in results:\n channels.append(result)\n\n if p.format == 'pretty':\n \"\"\"\n int \"id\"\n string \"label\"\n string \"name\"\n string \"provider_name\"\n int \"packages\"\n int \"systems\"\n string \"arch_name\"\n \"\"\"\n if p.type == \"software\":\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Parent Label\",\n \"End Of Life\",\n \"Arch\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Parent Label\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['parent_label'],\n c['end_of_life'],\n c['arch']\n ])\n else:\n t = prettytable.PrettyTable([\n \"Label\",\n \"Name\",\n \"Provider Name\",\n \"Packages\",\n \"Systems\",\n \"Arch Name\"\n ])\n t.align[\"Label\"] = \"l\"\n t.align[\"Name\"] = \"l\"\n t.align[\"Packages\"] = \"r\"\n t.align[\"Systems\"] = \"r\"\n t.align[\"Provider Name\"] = \"l\"\n t.padding_width = 1\n for c in results:\n\n t.add_row([\n c['label'],\n c['name'],\n c['provider_name'],\n c['packages'],\n c['systems'],\n c['arch_name']\n ])\n print(t)\n\n elif p.format == 'json':\n output = json.dumps(dict(channels=channels))\n print(output)\n else:\n for result in results:\n print(result)\n return results",
"def test_get_pci_switch_list(self):\n pass",
"def _switches_from_input(self, inp):\n inp = self.connector_by_label(inp)\n if not inp.is_box_input():\n raise ConnectorError(\"Argument has to be a box input.\")\n switches = []\n for routes in self.routes[inp.label].values():\n for route in routes:\n for connection in route:\n if (connection.start.parent_type == 'switch'\n and connection.start.switch not in switches):\n switches.append(connection.start.switch)\n return switches",
"def get_switch_port_mapping(self,switch_name):\n switch_list = []\n switch_list = self.__graph_dict[switch_name]\n return switch_list",
"def fusion_api_get_switch_types(self, param='', api=None, headers=None):\n return self.swtypes.get(api=api, headers=headers, param=param)",
"def getports(self, req: json, **kwargs) -> Response:\n\n switch_instance = self.ryu_app\n dpid = int(kwargs['dpid'])\n\n port_list = []\n\n for port, port_info in switch_instance.dpset.port_state[dpid].items():\n port_list.append({\"hw_addr\": port_info.hw_addr, \"name\": port_info.name.decode(\"utf-8\"), \"openflow_port\": port})\n\n # Sort the ports by openflow port order - this corresponds to their order on the switch as well\n port_list = sorted(port_list, key=lambda i: i[\"openflow_port\"])\n\n body = json.dumps(port_list)\n\n return create_response(req, body)",
"def parallel_switches(net):\n parallel_switches = []\n compare_parameters = ['bus', 'element', 'et']\n parallels_bus_and_element = list(\n net.switch.groupby(compare_parameters).count().query('closed > 1').index)\n for bus, element, et in parallels_bus_and_element:\n parallel_switches.append(list(net.switch.query(\n 'bus==@bus & element==@element & et==@et').index))\n if parallel_switches:\n return parallel_switches",
"def load_switches(self):\n new_switches = list()\n for site in self.sites:\n switches = self.get_switches_stats(site_id=site['id'])\n for switch in switches:\n if len(switch['name']) < 1:\n switch['name'] = ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)])\n new_switch = {\n \"name\": switch['name'],\n \"site\": site['name'],\n \"site_id\": site['id'],\n \"device_id\": switch['id'],\n \"mac\": switch['mac'],\n \"mac_str\": ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)]),\n \"ip_config\": switch['ip_config'],\n \"ip_actual\": switch['ip_stat'],\n \"net_obj\": get_network(address=switch['ip_config']['ip'], netmask=switch['ip_config']['netmask']) if 'ip' in switch['ip_config'] else None\n }\n for vlan, addr in new_switch['ip_actual']['ips'].items():\n if new_switch['ip_actual']['ip'] == addr:\n new_switch['ip_actual']['vlan'] = vlan.strip('vlan')\n else:\n new_switch['ip_actual']['vlan'] = 0\n if new_switch['ip_config']['network'] and new_switch['ip_config']['network'] != \"default\":\n new_switch['ip_config']['vlan'] = site['network_template']['networks'][new_switch['ip_config']['network']]['vlan_id']\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n elif new_switch['ip_config']['network'] and new_switch['ip_config']['network'] == \"default\":\n new_switch['ip_config']['vlan'] = 1\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n else:\n new_switch['ip_config']['vlan'] = 0\n logger.error(f\"Did not match {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n new_switches.append(new_switch)\n self.switches = new_switches",
"def list(options=None):\n if options is None:\n return requests.get('/')\n else:\n return requests.get('/', options)",
"def available_list(cls, num):\n return cls.objects.filter(status=0)[:num]",
"def get_sweeps(self):\n return self.master.get_sweeps()",
"def show_networks():\n return get_networks()",
"def get_switch_stringlist(self):\n return text_switch"
] | [
"0.6520135",
"0.6164974",
"0.6038765",
"0.5980532",
"0.58495516",
"0.5426051",
"0.5400386",
"0.53886455",
"0.5363743",
"0.533867",
"0.52681065",
"0.52616465",
"0.52521765",
"0.52512336",
"0.5225842",
"0.5202271",
"0.518069",
"0.5145818",
"0.51111346",
"0.51039314",
"0.5092655",
"0.5087983",
"0.50709903",
"0.5065694",
"0.50017136",
"0.49847692",
"0.4930545",
"0.4844996",
"0.48076192",
"0.4799683"
] | 0.6331513 | 1 |
Gets a default or paginated collection of Switch Types. [Arguments] | def fusion_api_get_switch_types(self, param='', api=None, headers=None):
return self.swtypes.get(api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switches(self) -> List[dict]:\n return self.items_by_domain(\"switch\")",
"def list_switches(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Switch\"]",
"def get_all_switch(self, conf):\n\t\tpass",
"def switches(self):\n return {k:v for k, v in self._data.items() \n if v[\"type\"] == \"SWITCH\"}",
"def get_all_switches(name):\n return [False,False,False,False] #TODO Implement",
"def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)",
"def standard_type_list(request):\n from .settings import STD_TYPE_SLUG_MAP\n context = {'standard_types': STD_TYPE_SLUG_MAP}\n return TemplateResponse(request, 'curricula/standard_type_list.html', context)",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def get_switch_stringlist(self):\n return text_switch",
"async def get_switches(self):\n return await self.get_states_by_tag_prefix(\"led\")",
"def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)",
"def get_types(self):\n return self.types",
"def get_switches(self) -> tuple:\n return self.switches",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def _add_switchs(self):\r\n lst = self.model.get_all_switch()\r\n\r\n for itm in lst:\r\n self._add_switch(itm)",
"def getSwitchInfo():\n swDB = switchdb.DB()\n raw_info = swDB.getAllSummary()\n switchList = []\n for row in raw_info:\n row = list(row)\n switch = {}\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"swver\"] = row[2]\n switch[\"ip\"] = row[3]\n switch[\"check\"] = row[4]\n switch[\"total\"] = row[5]\n switch[\"up\"] = row[6]\n switch[\"down\"] = row[7]\n switch[\"disabled\"] = row[8]\n if switch[\"total\"] == 0:\n switch[\"capacity\"] = 0\n else:\n switch[\"capacity\"] = (switch[\"up\"] / switch[\"total\"]) * 100\n switchList.append(switch)\n swDB.close()\n return switchList",
"def getPrimaryTypes() -> List[int]:\n ...",
"def get_switches_stats(self, site_id: str) -> List:\n try:\n stats = self.api.get(host=self.host, endpoint=f\"/api/v1/sites/{site_id}/stats/devices?type=switch\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting switch stats:{TextColors.ENDC} {e}\")\n raise e\n return stats",
"def _switches_from_input(self, inp):\n inp = self.connector_by_label(inp)\n if not inp.is_box_input():\n raise ConnectorError(\"Argument has to be a box input.\")\n switches = []\n for routes in self.routes[inp.label].values():\n for route in routes:\n for connection in route:\n if (connection.start.parent_type == 'switch'\n and connection.start.switch not in switches):\n switches.append(connection.start.switch)\n return switches",
"def get_of_switches(self):\n try:\n of_response = requests.get(self.url + \"restconf/operational/opendaylight-inventory:nodes\",\n headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(\"Error get_of_switches \" + error_text)\n\n self.logger.debug(\"get_of_switches \" + error_text)\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"get_of_switches. Unexpected response, not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, not a dict. Wrong version?\")\n\n nodes = info.get('nodes')\n if type(nodes) is not dict:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s\",\n str(type(info)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes', not found or not a dict.\"\n \" Wrong version?\")\n\n node_list = nodes.get('node')\n if type(node_list) is not list:\n self.logger.error(\"get_of_switches. Unexpected response, at 'nodes':'node', \"\n \"not found or not a list: %s\", str(type(node_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, at 'nodes':'node', not found \"\n \"or not a list. Wrong version?\")\n\n switch_list = []\n for node in node_list:\n node_id = node.get('id')\n if node_id is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s\",\n str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'id', not found. \"\n \"Wrong version?\")\n\n if node_id == 'controller-config':\n continue\n\n node_ip_address = node.get('flow-node-inventory:ip-address')\n if node_ip_address is None:\n self.logger.error(\"get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:\"\n \"ip-address', not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found. Wrong version?\")\n\n node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16)\n switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])),\n node_ip_address))\n return switch_list\n\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"get_of_switches \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)",
"def test_get_pci_switch_list(self):\n pass",
"def get_building_choices(call_type=None):\n dataservices = DataService.objects()\n buildings_list = []\n for dataservice in dataservices:\n for building in dataservice.buildings:\n print building\n if building not in buildings_list:\n buildings_list.append(building)\n if not call_type:\n return zip(buildings_list, buildings_list)\n else:\n return buildings_list",
"def initDefaultChoices(self):\n return []",
"def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types",
"def jump_to_all(self, type_name):\n return self.context.all(type_name)",
"def get_nodes_by_type(ntwrk, typ='switch'):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if v['type'] == typ}",
"def listEnabledTypes(self):\n actual_type = self.request.get('portal_type', None)\n collage_options = getCollageSiteOptions()\n ttool = getToolByName(self.context, 'portal_types', None)\n if ttool is None:\n return None\n return [\n {\n 'id': pt.getId(),\n 'title': p_(pt.Title()),\n 'selected': pt.getId() == actual_type and 'selected' or None\n }\n for pt in ttool.listTypeInfo()\n if collage_options.enabledAlias(pt.getId())\n ]",
"def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}",
"def opinion_type_list():\n for type_ in orm.DataFlagOpinionType.select():\n click.echo(type_.name)",
"def get_switching_options(self):\n\n return self._switch_opt_infos.iterkeys()"
] | [
"0.6429243",
"0.62044716",
"0.5973956",
"0.5961749",
"0.58429617",
"0.5724761",
"0.56757826",
"0.54745233",
"0.5419325",
"0.5411464",
"0.53042865",
"0.5303776",
"0.53003997",
"0.52718854",
"0.5233143",
"0.5197792",
"0.5194815",
"0.5182634",
"0.5126415",
"0.50823814",
"0.50694305",
"0.5046666",
"0.500306",
"0.4992808",
"0.49543864",
"0.4947335",
"0.494391",
"0.49186066",
"0.4899974",
"0.4898943"
] | 0.6891026 | 0 |
Creates a Uplink Set [Arguments] | def fusion_api_create_uplink_set(self, body, param='', api=None, headers=None):
return self.uplink_set.create(body, param, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def up_cmd(ctx):\n pass",
"def addOnCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass",
"def get_args():\n\n parser = argparse.ArgumentParser(description=\"Tool to create an uplink subnet for FIP access via VSG/VRS-G gateway.\")\n parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')\n parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)\n parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)\n parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)\n parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)\n parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)\n parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)\n parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect (deprecated)', dest='nosslcheck', action='store_true')\n parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')\n parser.add_argument('--fip', required=True, help='FIP subnet CIDR', dest='fip_net')\n parser.add_argument('--address', required=True, help='Uplink network address', dest='uplink_addr')\n parser.add_argument('--mask', required=True, help='Uplink network netmask', dest='uplink_mask')\n parser.add_argument('--gw', required=True, help='Uplink network gateway', dest='uplink_gw')\n parser.add_argument('--ip', required=True, help='Uplink interface IP', dest='uplink_ip')\n parser.add_argument('--mac', required=True, help='Uplink interface MAC', dest='uplink_mac')\n parser.add_argument('--vsg', required=True, help='VSG/VRS-G name as it appears in your infrastructure (defaults to IP if you have not changed it)', dest='gw_name')\n parser.add_argument('--port', required=True, help='VSG/VRS-G Network Interface Name', dest='gw_port')\n parser.add_argument('--vlan', required=True, help='VSG/VRS-G Network Interface Vlan ID', dest='gw_vlan')\n args = parser.parse_args()\n return args",
"def create(*args):",
"def addOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass",
"def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)",
"def fusion_api_edit_uplink_set(self, body, uri, api=None, headers=None):\n return self.uplink_set.update(body, uri, api, headers)",
"def __init__(__self__,\n resource_name: str,\n args: FederationUpstreamArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def command_add(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.addlink(current_name, tag, target, inverse_tag)\n return 'Added link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"Link already existed.\"",
"def main(argv: t.List[str] = sys.argv):\n if len(argv) < 2:\n usage_message(argv)\n\n config_uri = get_config_uri(argv)\n request = init_websauna(config_uri)\n\n with transaction.manager:\n engine = request.dbsession.get_bind()\n # Always enable UUID extension for PSQL\n # TODO: Convenience for now, because we assume UUIDs, but make this somehow configurable\n engine.execute('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"')\n\n Base.metadata.create_all(engine)",
"def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args",
"def LinkUpDn(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('linkUpDn', payload=payload, response_object=None)",
"def setUp(self, up):\n\t\tself.up = up",
"def setup(self, args={}):\n\n return Status.RUN",
"def duplicateCreateBlankSetup(self, *arg, **properties):\n recipe = CopySetup.createBlankSetupBookedMatchingSetupRange(properties,\n setupCallback=self.feedDuplicatedSetups,\n renderCallback=self.__renderCallback,\n multiTrackCallback=self.feedReloadSetupsMultiTracks,\n username=properties.get('username', ''))\n return recipe",
"def signup(**kwargs):\n\n pass",
"def createVersionForSetup(self, *arg, **properties):\n# result = DrawingImporter.createVersionForSetup(properties[\"show\"], properties[\"sequence\"], properties[\"beat\"], properties[\"setup\"]);\n# return result\n return True",
"def setup_args_create(parser):\n parser.add_argument(\"--domain\", required=False)\n parser.add_argument(\"--ansible\", required=False,\n dest=\"ansible\", action=\"store_true\")\n return parser",
"def make_set_up(set_up=None):\n def _do_set_up(obj):\n if set_up:\n return set_up(obj)\n return obj.setUp()\n return _do_set_up",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):"
] | [
"0.54519755",
"0.5435972",
"0.5399245",
"0.5381882",
"0.52568656",
"0.5251068",
"0.5216274",
"0.5202875",
"0.5168291",
"0.51639724",
"0.51597434",
"0.5073443",
"0.50513744",
"0.5046323",
"0.50254655",
"0.5006739",
"0.4990694",
"0.4967758",
"0.4956707",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899",
"0.4934899"
] | 0.7222193 | 0 |
Updates an Uplink Set [Arguments] | def fusion_api_edit_uplink_set(self, body, uri, api=None, headers=None):
return self.uplink_set.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command_update(arguments):\n global current_name\n tag = arguments[0]\n if (len(arguments) == 2):\n old_target, new_target = (...), arguments[1]\n else:\n old_target, new_target = arguments[1:]\n\n to_replace = network[current_name, tag, old_target]\n if not len(to_replace):\n return '\"' + tag + ': ' + old_target + '\" - no such link for this entity'\n if len(to_replace) > 1:\n return 'Sorry, tag \"' + tag + '\" is ambiguous.'\n inverse_tag = to_replace[0].inverse_tag\n to_replace.unlink()\n network.addlink(current_name, tag, new_target, inverse_tag)\n\n return 'Updated link from \"' + tag + ': ' + old_target + '\" to \"' + tag + ': ' + new_target + '\"'",
"def update(*args):",
"def fusion_api_create_uplink_set(self, body, param='', api=None, headers=None):\n return self.uplink_set.create(body, param, api, headers)",
"def update(self, args):\n pass",
"def links_update(link_from: str, link_to: str):\n if len(link_from) < 6:\n raise RuntimeError(\"from length must be at least 6\")\n if len(link_to) < 6:\n raise RuntimeError(\"to length must be at least 6\")\n base_link = get_short_link(link_from)\n if not base_link:\n raise RuntimeError(\"Couldn't find base link {}\".format(link_from))\n link_to_update = get_short_link(link_to)\n if not link_to_update:\n raise RuntimeError(\"Couldn't find existing short link {}\".format(link_to))\n link_to_update[\"full_hash\"] = base_link[\"full_hash\"]\n print(\"New link: {}\".format(pformat(link_to_update)))\n if are_you_sure(\"update link named {}\".format(link_to)):\n put_short_link(link_to_update)",
"def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))",
"def update(self, *args, **kwargs):",
"def up_cmd(ctx):\n pass",
"def update(self, *args, **kw):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def cmd_album_update(client, args):\n fields = data_fields(args, client.allowed_album_fields)\n album = client.update_album(args.album_id, fields)\n generate_output({'album': album})",
"def do_up(self, arg):\n self.do_timesheet('update %s' % arg)",
"def test_update_member(self):\r\n resource = 'member'\r\n cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })",
"def update(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)",
"def update(self, initial, follows):\n\t\tfilter = dict(_id=initial)\n\t\tkey = 'begets.' + fields.encode(follows)\n\t\toper = {'$inc': {key: 1}}\n\t\tself.db.update(filter, oper, upsert=True)",
"def update(self, initial, follows):",
"def update(self, params):",
"def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def update(self, **options):\n pass",
"def update(instance, args):\n for key in args.keys():\n setattr(instance, key, args[key])\n return instance",
"def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass"
] | [
"0.6135603",
"0.609327",
"0.5954924",
"0.5858715",
"0.57880354",
"0.56711507",
"0.563974",
"0.563974",
"0.563974",
"0.563974",
"0.563974",
"0.563974",
"0.56302327",
"0.55801797",
"0.5516977",
"0.5502105",
"0.54873466",
"0.54873466",
"0.54873466",
"0.5427483",
"0.53419477",
"0.53120077",
"0.53109866",
"0.53086233",
"0.5278114",
"0.5266517",
"0.5259146",
"0.52478665",
"0.5206084",
"0.5160033"
] | 0.67684245 | 0 |
Deletes an Uplink Set from the appliance based on name OR uri [Arguments] | def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):
return self.uplink_set.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)",
"def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)",
"def delete_set(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.delete_set\")\n\n flg.info(\"Set to delete: {}\".format(set_name))\n\n if mc.objExists(set_name):\n mc.select(set_name)\n old_objects = mc.ls(selection=True)\n flg.debug(\"Old Objects:\")\n for o in old_objects:\n flg.debug(o)\n ref_objects = mc.ls(selection=True, referencedNodes=True)\n\n ref_del_queue = []\n if len(ref_objects) > 0:\n flg.debug(\"Old Reference Nodes:\")\n for o in ref_objects:\n flg.debug(o)\n for o in ref_objects:\n flg.debug(\"Queuing {} for reference removal\".format(o))\n top = mc.referenceQuery(o, referenceNode=True)\n ref_del_queue.append(top)\n if len(ref_del_queue):\n for o in ref_del_queue:\n flg.debug(\"Removing reference: {}\".format(o))\n ref_file = mc.referenceQuery(o, filename=True)\n mc.file(ref_file, removeReference=True)\n for o in old_objects:\n try:\n flg.debug(\"Deleting {}\".format(o))\n mc.delete(o)\n except ValueError as e:\n flg.debug(\"Unable to delete {0}. Error: {1}\".format(o, e))\n flg.debug(\"Deleting set: {}\".format(set_name))\n mc.delete(set_name)",
"def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)",
"def delete(log, session, args):\n log('imageset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete imageset command coming soon.')",
"def Delete(url):\n\n prefix = ''.join([url, config_encoder.NAMESPACE_SEPARATOR])\n\n # Remove Test Suites\n test_keys = _GetEntityKeysByPrefix(ndb_models.Test, prefix)\n ndb.delete_multi(test_keys)\n\n # Remove Device Actions\n device_action_keys = _GetEntityKeysByPrefix(ndb_models.DeviceAction, prefix)\n ndb.delete_multi(device_action_keys)\n\n # Remove Test Run Actions\n test_run_action_keys = _GetEntityKeysByPrefix(\n ndb_models.TestRunAction, prefix)\n ndb.delete_multi(test_run_action_keys)\n\n # Remove Config Set Info\n config_set_info_key = mtt_messages.ConvertToKey(ndb_models.ConfigSetInfo, url)\n config_set_info_key.delete()",
"def delete(self, name):\n\n pass",
"def delete():",
"def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()",
"def delete_suggester(DomainName=None, SuggesterName=None):\n pass",
"def unlink(address):",
"def unlink(self, link_id):",
"def delete(self, *args: str):\n toDelete = {}\n for a in args:\n toDelete[a] = None\n return self._object.update(meta=toDelete)",
"def remove_link():",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)",
"def rm(args):\n args.delete = True\n return remove(args)",
"def catalog_alias_delete(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n alias.delete_ermrest_alias(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e",
"def cmd_album_delete(client, args):\n delete_album = client.album_delete(args.album_id)\n generate_output({'delete_album': delete_album})",
"def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))",
"def delete(self, application_id):",
"def destroyIpSetList(set_list_name):\n result = subprocess.Popen(\"/usr/sbin/ipset destroy %s 2>&1\" % set_list_name, shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy ipset %s. Error: %s.\" % (set_list_name, result))\n sys.exit(255)",
"def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))",
"def delete_run(arn=None):\n pass",
"def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}",
"def remove(name):",
"def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def delete(configsetname):\n cnfset = configsetPath(configsetname)\n files = os.listdir(cnfset)\n for f in files: os.remove(os.path.join(cnfset, f))\n os.rmdir(cnfset)\n return None"
] | [
"0.60390645",
"0.5904155",
"0.59041137",
"0.58668137",
"0.5849788",
"0.5787649",
"0.569228",
"0.56363535",
"0.56350154",
"0.5628608",
"0.5624718",
"0.5623885",
"0.55697083",
"0.5543166",
"0.55367833",
"0.55204713",
"0.55089206",
"0.5494897",
"0.5448532",
"0.54449373",
"0.54382175",
"0.5432634",
"0.5432451",
"0.5422735",
"0.5421446",
"0.54161257",
"0.54041725",
"0.5403106",
"0.5390207",
"0.5383461"
] | 0.8037802 | 0 |
Gets a default or paginated collection of Uplink Sets. [Arguments] | def fusion_api_get_uplink_set(self, uri=None, param='', api=None, headers=None):
return self.uplink_set.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_create_uplink_set(self, body, param='', api=None, headers=None):\n return self.uplink_set.create(body, param, api, headers)",
"def link_to_set(page):\n #s = set()\n links = Measurements.get_all_links(page)\n s = set(links)\n return s",
"def getSets():",
"def getSets(unique_name=None):",
"def __init__(self, sets: List[ColdStartUserSet]):\n self.sets = sets",
"def get_ups(self):\n return self.ups",
"def upset(self,\n _sortkey=operator.attrgetter('index'),\n _next_concepts=operator.attrgetter('upper_neighbors')):\n return algorithms.iterunion([self], _sortkey, _next_concepts)",
"def urlset(self):\n return self._urlset",
"def get_set(css_class_name, set_num=0):\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set",
"def GetUp(self, *args, **kwargs):\n pass",
"def fusion_api_edit_uplink_set(self, body, uri, api=None, headers=None):\n return self.uplink_set.update(body, uri, api, headers)",
"def get_site_collection(self, request):\n\n objects = self.get()\n\n groups = [\n ('topics', request.translate(_(\"Topics\"))),\n ('news', request.translate(_(\"Latest news\"))),\n ('imagesets', request.translate(_(\"Photo Albums\"))),\n ('forms', request.translate(_(\"Forms\"))),\n ('directories', request.translate(_(\"Directories\"))),\n ('resources', request.translate(_(\"Resources\"))),\n ]\n\n links = []\n\n for id, label in groups:\n for obj in objects[id]:\n # in addition to the default url/name pairings we use a group\n # label which will be used as optgroup label\n links.append({\n 'group': label,\n 'name': obj.title,\n 'url': request.link(obj)\n })\n\n return links",
"def get_publishers(self):",
"def Affiliations(self, default=[{}]):\n tmp = self.data.get('affiliations', default)\n return [HEP.AffiliationObject(i) for i in tmp]",
"def uploads(self):\r\n return u.Uploads(self)",
"def getMappingSets(self,name:str=None,prop:str=None,limit:int=100)->list:\n params ={\"limit\":limit}\n if name is not None:\n params['name'] = name\n if prop is not None:\n params['property'] = prop\n path = \"/mappingSets\"\n res = self.connector.getData(self.endpoint+path,params=params)\n data = res[\"data\"]\n return data",
"def Collections(self, default=[None]):\n return self.data.get('metadata', {}).get('_collections', default)",
"def __init__(self):\n self.EntireSet = []",
"def main():\n\n # get set shortnames from input\n sets = [i.lower() for i in sys.argv[1:]]\n\n # populate sets by shortname\n populate(sets)",
"def DefaultRequestSet(self) -> _n_6_t_0:",
"def getSet(unique_name):",
"def getSet(unique_name):",
"def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list",
"def test_collection_viewset_list_superuser(logged_in_apiclient, settings):\n client, user = logged_in_apiclient\n user.is_superuser = True\n user.save()\n url = reverse(\"models-api:collection-list\")\n collections = [CollectionFactory(owner=user).hexkey for _ in range(5)]\n other_user = UserFactory()\n collections += [CollectionFactory(owner=other_user).hexkey]\n\n result = client.get(url)\n assert result.status_code == status.HTTP_200_OK\n assert len(result.data[\"results\"]) == 6\n for coll_data in result.data[\"results\"]:\n assert coll_data[\"key\"] in collections",
"def oneups(self):\n return self.children.filter_by(kind=\"oneup\")",
"def GetFeedItemSetLink(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def record_sets_fetcher(record):\n return record.get(\"_oai\", {}).get(\"sets\", [])",
"def test_get_collections(self):\n pass",
"def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)",
"def get_feed_collection(client: Client):\n collections = client.get_services()\n command_results = CommandResults(\n outputs_prefix='CybleIntel.collection',\n outputs_key_field='names',\n outputs=collections\n )\n return command_results"
] | [
"0.6034614",
"0.5520875",
"0.5430212",
"0.5270419",
"0.5221473",
"0.519221",
"0.5191031",
"0.5018383",
"0.500693",
"0.49028212",
"0.48873365",
"0.48775196",
"0.48375025",
"0.47938913",
"0.47694138",
"0.47593793",
"0.475309",
"0.47418198",
"0.47398478",
"0.47387344",
"0.47362384",
"0.47362384",
"0.47327211",
"0.46954057",
"0.46838957",
"0.46529323",
"0.4624939",
"0.4615787",
"0.46045625",
"0.45911223"
] | 0.67165923 | 0 |
Removes a User from the appliance based on name OR uri [Arguments] | def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):
return self.user.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wipe_user(user_name):\n user_name = urllib.unquote(user_name) # Username is coming straight from the url bar.\n user = User.query.filter(User.user_name==user_name).first()\n delete_user(user)",
"def del_user(self, username):\n pass",
"def delete_user():",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)",
"def delete_user(UserName=None, AuthenticationType=None):\n pass",
"def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]",
"def delete_user():\n #TODO user delete\n pass",
"def removeUser(self, fullName):\n logger.debug(\"Func: removeUser\")\n\n # old Name removeUser\n currentDB = self._loadUsers()\n del currentDB[fullName]\n self._dumpJson(currentDB, self._pathsDict[\"usersFile\"])\n self._usersDict = currentDB\n return None, None",
"def del_user_by_username(name):\n collection = get_collection(\"user\")\n collection.delete_one({\"name\": name})\n return True",
"def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty",
"def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)",
"def delete_user(id):\n pass",
"def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)",
"def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)",
"def deleteUser(self,name):\n raise BorkedDeleteUser",
"def delete_user(BrokerId=None, Username=None):\n pass",
"def removeOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass",
"def remove_user(self, username): # remove only users from json file\n return self._user(username=username, remove=True)",
"def remove(self, user_id):\n pass",
"def delete_user(name: str):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n # check if user name exists at all\n elem = coll_users.find_one({\"name\": name})\n if elem is None:\n raise HTTPException(status.HTTP_404_NOT_FOUND,\n detail=f\"Could not find the user name {name}.\")\n\n # get all items which contain this user -> update/delete\n items = coll_items.find({\"users\": name})\n for item in items:\n item[\"users\"].remove(name)\n if len(item[\"users\"]) > 0:\n # update uses in this item\n coll_items.update_one(\n {\n \"_id\": item[\"_id\"]\n },\n {\n \"$set\": {\n \"users\": item[\"users\"]\n }\n })\n else:\n # delete this item (no user left)\n coll_items.delete_one({\"_id\": item[\"_id\"]})\n\n # delete user\n coll_users.delete_one({\"name\": name})",
"def remove_user(self, username):\n\n row = self.c.execute(\"SELECT * FROM profiles WHERE name =?\",\n (username,))\n for i in row:\n user = i[1]\n print(user)\n if user == username:\n self.c.execute(\"SELECT id FROM profiles WHERE name=?\",\n (username,))\n i_d = self.c.fetchone()[0]\n self.c.execute(\"DELETE FROM events WHERE user_id=?\", (i_d,))\n self.c.execute(\"DELETE FROM profiles WHERE name=?\", (username,))\n self.conn.commit()\n return True\n else:\n print ('User not found.')",
"def delete(isamAppliance, name, user_name, check_mode=False, force=False):\n user_found = False\n ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name)\n\n if (ret_obj['data']['users'] != None):\n for usr in ret_obj['data']['users']:\n if usr['name'] == user_name:\n user_found = True\n ret_obj['data']['users'].remove(usr)\n break\n\n if user_found is False and force is False:\n return isamAppliance.create_return_object()\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put(\n \"Delete user from management authorization role\",\n \"/authorization/roles/{0}/v1\".format(name), ret_obj['data'])",
"def test_delete_username(self):\n\n api.user.create(username='unwanted', password='secret',\n email='[email protected]')\n api.user.delete(username='unwanted')\n\n user = api.user.create(username='steven', password='secret',\n email='[email protected]')\n api.user.delete(user=user)",
"def delete_user():\r\n raise NotImplementedError()",
"def cleanup(self,context,result):\n if self.do_cleanup:\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-delete\", self.user_name],[])\n except:\n result.note_exception(cause=\"Resource cleanup: Can't remove user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result, Result.ERROR,'GSEC','Delete user',\n stdout,stderr)",
"def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")",
"def __remove_user_from_arguments_with_statement(db_statement, db_user):\n logger('QueryHelper', '{} with user{}'.format(db_statement.uid, db_user.uid))\n db_arguments = get_all_arguments_by_statement(db_statement.uid, True)\n for arg in db_arguments:\n if arg.author_uid == db_user.uid:\n revoke_author_of_argument_content(arg, db_user)",
"def remove_user(self, username):\n del self.user_table[username]",
"def delete(self):\n data = UserRegister.parser.parse_args()\n user = UserModel.find_by_username(data['username'])\n\n if user:\n user.delete_from_db()\n else :\n return {'message': 'User not found!'} , 204\n\n return {'message': 'User deleted'},202"
] | [
"0.7027543",
"0.70250374",
"0.6999531",
"0.68815476",
"0.67725843",
"0.6740869",
"0.67364764",
"0.6715946",
"0.6699434",
"0.6612588",
"0.6598224",
"0.6559849",
"0.65336317",
"0.65224516",
"0.6503584",
"0.6503184",
"0.64976114",
"0.6484936",
"0.6465641",
"0.6463593",
"0.6372265",
"0.6371003",
"0.6354305",
"0.63521856",
"0.6348511",
"0.63428056",
"0.63322294",
"0.6317361",
"0.6313679",
"0.6305779"
] | 0.76143265 | 0 |
Gets the appliance's supported API versions [Example] ${resp} = Fusion Api Get Appliance Api Versions | | | def fusion_api_get_appliance_api_versions(self, api=None, headers=None):
return self.version.get(api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)",
"def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")",
"def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")",
"def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")",
"def api_versions(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"api_versions\")",
"async def get_supported_versions(self) -> dict:\n return await self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def get_supported_versions(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION",
"def fusion_api_get_appliance_version(self, api=None, headers=None):\n return self.info.get_version(api=api, headers=headers)",
"def get_versions(self, api_spec: dict, user: Dict[str, Any] = None) -> dict:\n try:\n # NB The api 'versions' must match exactly the version numbers available here:\n # https://github.com/Open-EO/openeo-api\n api_versions = []\n for server in api_spec[\"servers\"][1:]:\n this_version = {\n \"production\": api_spec[\"info\"][\"production\"],\n \"url\": server[\"url\"],\n \"api_version\": server[\"description\"].split(\" \")[-1]\n }\n api_versions.append(this_version)\n\n return {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {\n \"versions\": api_versions\n }\n }\n\n except Exception as exp:\n return ServiceException(CapabilitiesService.name, 500, self._get_user_id(user), str(exp)).to_dict()",
"def getSupportedApiVersions(self):\n return self.supported_api_version",
"def api(self):\n res = self.client.call('/', 'GET', data='')\n self.logger.debug('Get openstack identity api versions: %s' % truncate(res))\n return res[0]",
"def list_versions(self):\n version_url = self._get_base_version_url()\n\n resp, body = self.raw_request(version_url, 'GET')\n # NOTE: We need a raw_request() here instead of request() call because\n # \"list API versions\" API doesn't require an authentication and we can\n # skip it with raw_request() call.\n self._error_checker(resp, body)\n\n body = json.loads(body)\n self.validate_response(schema.list_versions, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')",
"def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()",
"def _fetch_api_versions(self):\n log.debug(\"Fetch SASL authentication api versions.\")\n self._broker_connection.request(ApiVersionsRequest())\n response = ApiVersionsResponse(self._broker_connection.response())\n\n self.handshake_version = response.api_versions[SaslHandshakeRequest.API_KEY].max\n self.auth_version = response.api_versions.get(SaslAuthenticateRequest.API_KEY, None)\n\n self.handshake_version = min(self.MAX_HANDSHAKE_VERSION, self.handshake_version)\n if self.auth_version is not None:\n self.auth_version = min(self.auth_version.max, self.MAX_AUTH_VERSION)\n log.debug(\n \"Determinded handshake api version {} and authenticate api version {}\".format(\n self.handshake_version, self.auth_version\n )\n )",
"def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version",
"def ListVersions(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def adc_api_version():\n return jsonify({\"version\": adc.version})",
"def getAPIVersion(self, req):\n import re\n import tracrpc\n match = re.match(r'([0-9]+)\\.([0-9]+)\\.([0-9]+)', tracrpc.__version__)\n return map(int, match.groups())",
"def version():\n response = make_response('{\"version\" : %s }' % app.config.get('VERSION'), 200)\n response.content_type = \"application/json\"\n return response",
"def supported_marshaller_api_versions() -> Tuple[str]:\n return (\"1.0\",)",
"def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200",
"def version_info(self):\n if self._api_version is None:\n self.query_api_version()\n return self._api_version['api-major-version'],\\\n self._api_version['api-minor-version']",
"def api_version_sets(self) -> Optional[pulumi.Input['APIMgmtAPISpecPropertiesApiVersionSetsArgs']]:\n return pulumi.get(self, \"api_version_sets\")",
"def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions",
"async def test_beta_version_pagination(aresponses):\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/beta_week_page1\", False),\n status=200,\n headers=HEADERS,\n ),\n )\n aresponses.add(\n \"registry.hub.docker.com\",\n \"/v2/repositories/homeassistant/home-assistant/tags/page2\",\n \"get\",\n aresponses.Response(\n text=fixture(\"container/beta_week_page2\", False),\n status=200,\n headers=HEADERS,\n ),\n )\n async with aiohttp.ClientSession() as session:\n haversion = HaVersion(\n session=session,\n source=HaVersionSource.CONTAINER,\n channel=HaVersionChannel.BETA,\n )\n await haversion.get_version()\n assert haversion.version == BETA_VERSION",
"def _get_api_version(self):\n with self.nb_session.get(\n self.nb_api_url, timeout=10,\n verify=(not settings.NB_INSECURE_TLS)) as resp:\n result = float(resp.headers[\"API-Version\"])\n log.info(\"Detected NetBox API v%s.\", result)\n return result",
"def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))",
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()"
] | [
"0.7494345",
"0.73989725",
"0.73989725",
"0.73989725",
"0.73989725",
"0.73362356",
"0.72414464",
"0.708449",
"0.70133454",
"0.6999114",
"0.6908404",
"0.68778837",
"0.6701922",
"0.6691587",
"0.66610116",
"0.65204805",
"0.64245963",
"0.6255539",
"0.62366027",
"0.62292904",
"0.6161498",
"0.60917753",
"0.60205334",
"0.6013076",
"0.6004687",
"0.5991847",
"0.5950749",
"0.59491134",
"0.59185934",
"0.5915531"
] | 0.77546126 | 0 |
Sets the XAPIVersion header to the specified value for all future requests. If no value is supplied, the value of ${XAPIVersion} is used if it exists else the appliances current version is queried and used [Example] ${resp} = |Fusion Api Set Default Api Version | | | def fusion_api_set_default_api_version(self, api=None):
return self.version.set(api=api) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_api_version_header(response):\n response.headers.setdefault('X-API-Version', g.api_version)\n return response",
"def SetDefaultVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def getAPIVersion(self):\r\n self._update('getAPIVersion')\r\n return API_VERSION",
"def SetDefaultVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_api_versioning(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=views_api.CORE_API_DEFAULT_VERSION,\n )\n self.assertEqual(response.status_code, 200)",
"def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION",
"def SetDefaultVersion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def getAPIVersion(self, req):\n import re\n import tracrpc\n match = re.match(r'([0-9]+)\\.([0-9]+)\\.([0-9]+)', tracrpc.__version__)\n return map(int, match.groups())",
"def version():\n response = make_response('{\"version\" : %s }' % app.config.get('VERSION'), 200)\n response.content_type = \"application/json\"\n return response",
"def min_api_version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"min_api_version\")",
"def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version",
"def test_api_versioning_invalid_version(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=CORE_API_VERSION_INVALID,\n )\n self.assertEqual(response.status_code, 406)",
"def handle_request_version(self, msg):\n\t\tres = Response(msg)\n\t\tres.body['version'] = VERSION\n\t\tself.finished(msg.id, res)",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")",
"def api_version(self) -> Optional[str]:\n return pulumi.get(self, \"api_version\")"
] | [
"0.7020459",
"0.5847266",
"0.58408767",
"0.581011",
"0.56751347",
"0.55736",
"0.5506365",
"0.5420947",
"0.53353244",
"0.5267194",
"0.52175796",
"0.5127778",
"0.51140654",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168",
"0.49792168"
] | 0.61179054 | 1 |
Send a POST rest call to the EM RIS interface through the Fusion appliance | def hal_api_post_em_ris(self, fusion_ip, em_ip, resource, data=None, header=None, retries=5): # pylint: disable=unused-argument
dcs = BuiltIn().get_variable_value("${DCS}")
nic = BuiltIn().get_variable_value("${FUSION_NIC}")
ssh = paramiko.client.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not header:
header = '-H "Content-Type:application/json"'
if dcs:
command = '/usr/bin/curl %s -X POST %s -k https://' % (header, data) + em_ip + resource
else:
command = '/usr/bin/curl %s -X POST %s -k http://' % (header, data) + em_ip + "%" + nic + resource
try:
logger._log('Logging into EM to run: %s' % command)
ssh.connect(fusion_ip, username='root', password='hpvse1', timeout=30.0)
output = ssh.exec_command(command)
except paramiko.BadHostKeyException:
logger._warn("Not able to connect because of BadKeyException")
except paramiko.AuthenticationException:
logger._warn("Not able to connect because of Authentication")
except paramiko.SSHException:
logger._warn("Not able to connect because of SSHException")
except:
logger._warn("Not able to connect and run %s" % command)
stdoutl = list(output[1])
stderrl = list(output[2])
stdout = "".join(stdoutl)
stderr = "".join(stderrl)
logger._log("--- stdout ---")
logger._log(stdout)
logger._log("--- stderr ---")
logger._log(stderr)
logger._log("--- ---")
response = {'status_code': 0}
response['_content'] = ''
if stdout.find('HTTP') > -1:
retcode = re.sub('.*HTTP/1\.1 (\d+).*', '\g<1>', stdout)[:3]
respstr = re.sub('[^{]*(.*)', '\g<1>', stdout)
if int(retcode) != 202:
logger._warn("RIS call was not OK")
logger._log("Response:\n" + respstr)
else:
resource = '[{"executionTime":0,"risResources": { "%s" : %s' % (resource, respstr) + '},"service":"%s"}]' % fusion_ip
response['_content'] = resource
response['status_code'] = int(retcode)
else:
logger._log("RIS call returned no response")
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)",
"def do_POST(self,):\n self.http_method = 'POST'\n self.response()",
"def hal_api_perform_post_action(self, fusion_ip, action=None, parameters=None, retries=5):\n url = \"https://\" + fusion_ip + \\\n \"/perm/rest/tbird/pemOperation\"\n\n # Build Payload and initialize\n data = {\"PemOperation\": \"performAction\",\n \"ActionName\": \"\",\n \"ActionParameters\": {},\n \"SN\": BuiltIn().get_variable_value(\"${ENC_SERIAL_NUMBER}\")}\n if action is not None:\n data[\"ActionName\"] = action\n if parameters is not None:\n data[\"ActionParameters\"] = parameters\n\n # Retry in case rest call fails.\n # Currently calls to the webapp fail intermittently.\n attempts = 0\n for _ in range(0, retries):\n attempts += 1\n response = self.fusion_client.post(url, body=json.dumps(data))\n if isinstance(response, dict):\n # Normally, the response is returned in the form of a dict\n # If the status code is 200, it worked; don't make more attempts\n if response['status_code'] == 200:\n break\n else:\n # In this case, the HAPI received invalid data and chose to return a zero-length response.\n # This string may contain something like \"<Response: [200]>\"\n # but it is probably not incredibly useful to validate.\n # Return a dict since Robot Framework doesn't deal well with return values that could be either a dict or a string.\n return {\"response_string\": response}\n if attempts > 1:\n logger._warn(\n \"%d attempts were made; this operation should only take one attempt.\" % attempts)\n return response",
"def PostRequest(self):\n if self.__Payload: \n self.__Answer = requests.post(self.__URL, data = self.__Payload, headers = self.__Headers)\n Logs.WriteToLog(\"Data transited to web server\")\n else:\n Logs.WriteToLog(\"No payload in HTTP request\")\n raise Exception(\"Payload must be setted\")",
"def post(self, method):\n\n # Prepare parameters\n values = self.values\n values['uid'] = settings.ACA_API_USER\n values['password'] = settings.ACA_API_PASSWORD\n\n # Generate POST entity\n entity = et.Element('STUREQ')\n ele = et.SubElement(entity, 'Vers') # dunno why this is different\n ele.text = '1.00'\n for key, value in values.items():\n ele = et.SubElement(entity, key.upper())\n ele.text = value\n data = et.tostring(entity, encoding='big5')\n\n # Builds and sends the HTTP request\n url = settings.ACA_API_URL.format(method)\n headers = {\n 'Content-Type': 'text/xml; charset=big5',\n 'X-Requested-With': 'NTUVote',\n }\n\n try:\n response = requests.post(url, data=data, headers=headers)\n except Exception as e:\n logger.exception('Failed to connect the ACA server')\n raise ExternalError(code='external_server_down') from e\n\n return AcaResponse(response)",
"def _post(self, *args, **kwargs):\n return self._request('post', *args, **kwargs)",
"def do_POST(self):\r\n self._send_handler_response('POST')",
"def test_api_use_infonet_communication_post(self):\n body = Body5()\n response = self.client.open(\n '/api/use/infonet-communication/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def simulate_post(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'POST', path, **kwargs)",
"def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)",
"def post_api_url(uri, data):\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n headers = {'Content-Type': 'application/yang-data+json',\n 'Accept': 'application/yang-data+json'}\n\n url = \"https://{host}:{port}/{uri}\".format(host=iosxe_restconf['address'],\n port=iosxe_restconf['port'], uri=uri)\n resp = requests.post(url,\n auth=(iosxe_restconf['username'], iosxe_restconf['password']),\n verify=False,\n headers=headers,\n data=data\n )\n return resp",
"def post(self):\n\n if config.logging:\n logfile.info(\"Request to /rest/broadsoft recieved\")\n if config.verbose:\n logconsole.info(\"Request to /rest/broadsoft recieved\")\n\n # Ensure that the user has sent a jwt to the endpoint.\n try:\n verify_jwt_in_request()\n except Exception as error:\n return make_response(\"<error>Unauthorized</error>\", 401)\n\n # Create a user object from the JWT identity object.\n user = User().from_identity(get_jwt_identity())\n\n # Check if a user was able to be created.\n if user is None:\n return \"<ErrorInfo><message>Not logged in</message><error>true</error></ErrorInfo>\", 401\n\n # Create a request parser to parse arguments\n parser = reqparse.RequestParser()\n\n # Configure endpoint arguments.\n parser.add_argument(\n name='endpoint',\n help='Missing the required broadsoft endpoint to connect to.',\n required=True)\n\n parser.add_argument(\n name='data',\n type=str,\n help='JSON data needs to be a string')\n\n parser.add_argument(\n name='method',\n help='Missing method type. ex) method:GET/PUT/POST...',\n required=True)\n\n # Check if the arguments passed were valid.\n try:\n args = parser.parse_args()\n except reqparse.exceptions.BadRequest as e:\n # If there are any errors, ensure that login=False is sent.\n message = \"<error>true</error>\"\n return message, 400\n\n # Get the data sent from the request.\n url = self.url + args['endpoint'].replace(\"<user>\", user.username)\n data = \"\"\n method = args['method']\n\n # Check if any data was sent\n if(args['data']):\n data = args['data']\n\n if config.logging:\n logfile.info(\"Sending data: \" + method + \" \" + url + \" \" + data)\n if config.verbose:\n logconsole.info(\"Sending data: \" + method + \" \" + url + \" \" + data)\n\n # Get the user's broadsoft token from the JWT and send a request to broadsoft.\n response = Proxy().to_broadsoft(method, url, data, user)\n\n # Check if a valid response was returned.\n if response.status_code == 200 or response.status_code == 201:\n\n # Output a response to the console and log files.\n if config.logging:\n logfile.info(\"Recieved:\" + str(response.status_code) + \" \" + str(response.content) if response.content else \"Recieved: \" + str(response.status_code))\n if config.verbose:\n logconsole.info(\"Recieved:\" + str(response.status_code) + \" \" + str(response.content) if response.content else \"Recieved: \" + str(response.status_code))\n\n # Format a response\n if response.content:\n return make_response(str(response.content.decode('ISO-8859-1')), 200)\n else:\n return make_response(\"\", 200)\n else:\n if config.logging:\n logfile.info(\"Recieved:\" + str(response.status_code) + \" \" + response.content.decode('ISO-8859-1') if response.content else \"\")\n if config.verbose:\n logconsole.info(\"Recieved:\" + str(response.status_code) + \" \" + response.content.decode('ISO-8859-1') if response.content else \"\")\n\n if response.content:\n return make_response(response.content.decode('ISO-8859-1'), response.status_code)\n else:\n return make_response(\"\", response.status_code)",
"def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)",
"def _rest_call(self, data, action):\n path = '/wm/staticflowentrypusher/json'\n headers = {\n 'Content-type': 'application/json',\n 'Accept': 'application/json',\n }\n body = json.dumps(data)\n conn = httplib.HTTPConnection(self.host, self.port)\n conn.request(action, path, body, headers)\n response = conn.getresponse()\n ret = (response.status, response.reason, response.read())\n conn.close()\n return ret",
"def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)",
"def post_form(url, headers, payload):\n\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n return RestClient.make_post_request(url, headers=headers, data=payload)",
"def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)",
"def post(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None: # this is for new items\n content_length = len(self.request.body)\n if content_length==0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST returning 400 response. Expected non zero content length.')\n else:\n json_body = json.loads(self.request.body)\n d = {}\n d['v'] = json_body['v']\n d['ip'] = json_body['cloudagent_ip']\n d['port'] = int(json_body['cloudagent_port'])\n d['operational_state'] = cloud_verifier_common.CloudAgent_Operational_State.START\n d['public_key'] = \"\"\n d['tpm_policy'] = json_body['tpm_policy']\n d['vtpm_policy'] = json_body['vtpm_policy']\n d['metadata'] = json_body['metadata']\n d['ima_whitelist'] = json_body['ima_whitelist']\n d['revocation_key'] = json_body['revocation_key']\n d['tpm_version'] = 0\n d['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']\n d['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']\n d['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']\n d['hash_alg'] = \"\"\n d['enc_alg'] = \"\"\n d['sign_alg'] = \"\"\n\n new_agent = self.db.add_agent(agent_id,d)\n\n # don't allow overwriting\n if new_agent is None:\n common.echo_json_response(self, 409, \"Agent of uuid %s already exists\"%(agent_id))\n logger.warning(\"Agent of uuid %s already exists\"%(agent_id))\n else:\n asyncio.ensure_future(self.process_agent(new_agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('POST returning 200 response for adding agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"POST returning 400 response. uri not supported\")\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"POST returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n\n self.finish()",
"def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)",
"def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)",
"def http_method_post():\n return 'POST'",
"def post(self):\n code, status = run_handlers.handle_data_post(self.request.headers, self.request.body)\n self.set_status(code)\n self.write(status)\n self.finish()",
"def post(self, uri, body=None, headers=None, auth=False):\n return self.send_request('POST', uri, body, headers, auth)",
"def send_post(url):\n HEADERS['accept'] = 'application/vnd.yang.data+json'\n if not url.startswith('/'):\n url = \"/{}\".format(url)\n url = BASE_URL + url\n resp = requests.post(url, headers=HEADERS)\n return resp",
"def do_POST(s):\n\t\tprint 'a'\n\t\tif s.path == '/execute':\n\t\t\tjson = draw.begin_draw()\n\n\t\ts.send_response(200)\n\t\ts.send_header(\"Content-type\", \"text/json\")\n\t\ts.end_headers()\n\n\t\tjson2 = json.encode('utf-8')\n\t\ts.wfile.write(json2)",
"async def post(self):\r\n data = await self.request.json()\r\n register_date = data[\"register_date\"]\r\n ip_address = data[\"ip_address\"]\r\n try:\r\n Agent.create(register_date=register_date, ip_address=ip_address)\r\n response_obj = {\"status\": \"success\"}\r\n return web.Response(text=str(response_obj), status=201)\r\n except Exception as exception:\r\n response_obj = {\"status\": \"failed\", \"reason\": exception}\r\n error_message = str(exception)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def rhevPost(url,data):\n conn = rhevConnect()\n conn.request(\"POST\", url, body = data.encode('utf-8'), headers = getHeaders())\n print url\n r = conn.getresponse()\n ## DEBUG \n ## TODO: check status \n status = r.read()\n print r.status,r.reason\n if int(r.status)>=400:\n print r.reason\n print status\n return status\n ## return r.read()",
"def post(self, *args, **kw):\n kw['method'] = 'POST'\n return self.open(*args, **kw)",
"def post(self, identity, service_name, hostname, params):\n return self.request(identity, 'POST', service_name, hostname, params)",
"def postTo(self,conn,data):\n #log(\"postTo: \"+str(conn))\n conn.request(self.command,self.path,data,self.headers)\n resp = conn.getresponse()\n log(\"postTo: \"+str(resp.status)+\", \"+str(resp.reason)+\", \"+str(resp.version))\n return resp"
] | [
"0.629005",
"0.6278455",
"0.6227176",
"0.61504656",
"0.6135106",
"0.6040742",
"0.60321116",
"0.6011967",
"0.5993692",
"0.59902436",
"0.59897685",
"0.59825003",
"0.5982412",
"0.59688205",
"0.59475356",
"0.59312737",
"0.59105766",
"0.5900485",
"0.5899863",
"0.5885932",
"0.58621204",
"0.5817727",
"0.578824",
"0.57858837",
"0.5777784",
"0.57725984",
"0.5769594",
"0.57515323",
"0.5750588",
"0.5738655"
] | 0.6545464 | 0 |
Send a Claim call to the Perm Webapp on Fusion | def perm_api_perform_claim(self, fusion_ip, fusion_claim_ip, retries=5):
# Build URL
url = "https://" + fusion_ip + "/perm/rest/resources/atlas/tbird/fts?ipAddress=" + fusion_claim_ip
response = None
# Retry in case rest call fails.
# Currently calls to the webapp fail intermittently.
attempts = 0
for _ in range(0, retries):
attempts += 1
response = self.fusion_client.get(url)
# Expect to get the following response upon successful 'fts' call:
# response{ 'fts': True , 'ipAddress: <fusion_claim_ip>' }
# 'fts': Status code of perm call to 'fts'. True = Success. False = Failure
# 'ipAddress': Should be fusion_claim_ip
if 'fts' in response and response['fts'] is True and 'ipAddress' in response and response['ipAddress'] != '':
break
if attempts > 1:
logger._warn("%d attempts were made; this operation should only take one attempt." % attempts)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def claim(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, name = ch.parse_number_and_name(args)\n out = ch.claim(ctx.user_object, name, number)\n await ctx.send(out)",
"def post(self):\n dao = ClaimDao()\n return dao.create(api.payload)",
"def post(self, request, *args, **kwargs):\n\n\t\tself.object = None\n\t\tclaim_pk = kwargs.get('claim_pk')\n\t\tself.claim = get_object_or_404(Claim, pk=claim_pk)\n\t\tform = self.get_form(self.get_form_class())\n\n\t\textra_forms = {\n\t\t\t'form': form,\n\t\t}\n\n\t\tif form.is_valid():\n\t\t\treturn self.form_valid(**extra_forms)\n\t\telse:\n\t\t\tmessages.add_message(self.request, messages.ERROR, \"Error saving Vendor Request Claim information\",\n\t\t\t\t\t\t\t\t extra_tags=\"alert alert-danger\")\n\t\t\treturn self.form_invalid(**extra_forms)",
"def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)",
"def _post(self, data=None, headers=None):\n return self.api.send_http_request_v2(method=\"POST\", url=\"https://auth.iqoption.com/api/v2/verify/2fa\",data=json.dumps(data), headers=headers)",
"def claim_email(request):\n email = request.POST.get('email', '')\n email_user = User.objects.filter(email=email)\n payload = {\n 'res': 'failed'\n }\n if email_user.exists() and \\\n not email_user[0].profile.send_mail:\n request.user.profile.add_email(email)\n payload['res'] = 'success'\n\n return payload",
"def claim_create_onaccept(form):\n\n # Get record ID\n form_vars = form.vars\n if \"id\" in form_vars:\n record_id = form_vars.id\n elif hasattr(form, \"record_id\"):\n record_id = form.record_id\n else:\n return\n\n db = current.db\n s3db = current.s3db\n\n table = s3db.fin_voucher_claim\n btable = s3db.fin_voucher_billing\n ptable = s3db.fin_voucher_program\n join = [ptable.on(ptable.id == table.program_id),\n btable.on(btable.id == table.billing_id),\n ]\n query = (table.id == record_id)\n row = db(query).select(table.id,\n table.program_id,\n table.billing_id,\n table.pe_id,\n table.status,\n btable.date,\n ptable.name,\n ptable.organisation_id,\n join = join,\n limitby = (0, 1),\n ).first()\n if not row:\n return\n program = row.fin_voucher_program\n billing = row.fin_voucher_billing\n claim = row.fin_voucher_claim\n\n if claim.status != \"NEW\":\n return\n\n error = None\n\n # Look up the provider organisation\n pe_id = claim.pe_id\n otable = s3db.org_organisation\n provider = db(otable.pe_id == pe_id).select(otable.id,\n otable.name,\n limitby = (0, 1),\n ).first()\n\n from .helpers import get_role_emails\n provider_accountants = get_role_emails(\"PROVIDER_ACCOUNTANT\", pe_id)\n if not provider_accountants:\n error = \"No provider accountant found\"\n\n if not error:\n # Lookup the template variables\n base_url = current.deployment_settings.get_base_public_url()\n appname = current.request.application\n data = {\"program\": program.name,\n \"date\": btable.date.represent(billing.date),\n \"organisation\": provider.name,\n \"url\": \"%s/%s/fin/voucher_claim/%s\" % (base_url, appname, claim.id),\n }\n\n # Send the email notification\n from .notifications import CMSNotifications\n error = CMSNotifications.send(provider_accountants,\n \"ClaimNotification\",\n data,\n module = \"fin\",\n resource = \"voucher_claim\",\n )\n if error:\n # Inform the program manager that the provider could not be notified\n msg = T(\"%(name)s could not be notified of new compensation claim: %(error)s\") % \\\n {\"name\": provider.name, \"error\": error}\n program_managers = get_role_emails(\"PROGRAM_MANAGER\",\n organisation_id = program.organisation_id,\n )\n if program_managers:\n current.msg.send_email(to = program_managers,\n subject = T(\"Provider Notification Failed\"),\n message = msg,\n )\n current.log.error(msg)\n else:\n current.log.debug(\"Provider '%s' notified about new compensation claim\" % provider.name)",
"async def store_claim(self, claim_json: str) -> None: # prover\n await anoncreds.prover_store_claim(self.wallet_handle, claim_json)",
"def petition(handler):\n # client_object = Clients()\n sound_cloud_client = Clients().sound_cloud_client()\n handler.redirect(sound_cloud_client.authorize_url())",
"def send_mfa(\n self,\n form: object = None, # noqa: ARG002\n code: str = \"\",\n trusted_device: bool = True,\n ) -> None:\n el_otp = self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n el_otp.clear()\n el_otp.send_keys(code)\n\n el_verify = self._driver.find_element(By.CSS_SELECTOR, \"input[type=submit]\", timeout=5)\n if el_verify.accessible_name != \"Verify\":\n msg = f'{self.__class__.__name__}: Cannot find \"Verify\" button'\n raise IdpError(msg)\n\n if trusted_device:\n el_verify.click()\n\n self._stay_signed_in()",
"def claimToken(self):\n response = self._session.get('https://plex.tv/api/claim/token.json', headers=self._headers(), timeout=TIMEOUT)\n if response.status_code not in (200, 201, 204): # pragma: no cover\n codename = codes.get(response.status_code)[0]\n errtext = response.text.replace('\\n', ' ')\n raise BadRequest(f'({response.status_code}) {codename} {response.url}; {errtext}')\n return response.json()['token']",
"def fhir_request(request, fhir_ask):\n\n fhir_call = {'CLIENT_ID': CLIENT_ID,\n 'AUTH_URL': AUTH_URL,\n 'template': \"result.html\",\n 'ask': \"/fhir/Patient?_format=json\",\n 'name': \"FHIR Server Data\",\n 'format': \"json\",\n 'headers': {'content-type': 'application/json',\n 'bearer': get_code(CLIENT_ID, AUTH_URL)},\n 'url': settings.OAUTH_TEST_INFO['BASE'],\n }\n\n if fhir_ask == None:\n return None\n\n # Overlay fhir_ask onto fhir_call\n for key, value in fhir_ask.items():\n fhir_call[key] = value\n\n if settings.DEBUG:\n print(\"Ask:\", fhir_ask)\n print('Call:', fhir_call)\n\n # Get the current State value\n state = get_state(CLIENT_ID, AUTH_URL)\n code = get_code(fhir_call['CLIENT_ID'], fhir_call['AUTH_URL'])\n access = get_access(state)\n refresh = get_refresh(state)\n\n parms = {'client_id': CLIENT_ID,\n 'redirect_uri': REDIRECT_URI,\n 'client_secret': CLIENT_SECRET,\n # 'grant_type': 'authorization_code',\n 'grant_type': 'refresh_token',\n 'refresh_token': refresh,\n 'state': state,\n 'response_type': 'code',\n }\n\n data = {'refresh_token': refresh,'redirect_uri': REDIRECT_URI,\n 'code': code,}\n\n # o = requests.post(TOKEN_URL, data=params, headers=fhir_call['headers'])\n # print(\"O:\", o.text)\n\n # url = SERVICE.get_authorize_url(**params)\n\n if settings.DEBUG:\n print(\"Code:\", code, \"Accees:\", access, \"Refresh:\", refresh)\n\n # data = {'code': code,\n # 'grant_type': 'authorization_code',\n # 'redirect_uri': REDIRECT_URI,\n # 'access_token': access,\n # 'refresh_token': refresh,\n # }\n\n session = SERVICE.get_auth_session(data={'code':code,'redirect_uri':REDIRECT_URI})\n #session = SERVICE.get_raw_access_token('POST',**data)\n response = session.json()\n\n print(\"RESPONSE:\", response)\n\n # r = session.get(fhir_call['url']+fhir_call['ask'], headers=fhir_call['headers'] )\n # session = SERVICE.get_session(response['access_token'])\n # print(\"Get:\", r)\n\n pass_to = fhir_call['url']\n pass_to += fhir_call['ask']\n\n headers = fhir_call['headers']\n\n r = session.get(pass_to, headers=headers)\n\n if settings.DEBUG:\n print(\"R:\", r)\n print(r.status_code)\n print(r.text)\n\n if r.status_code == 200:\n me = r.json()\n print(\"Me.json returned\", me)\n else:\n\n msg = \"Error %s: %s. [%s]\" % (r.status_code,\n ERROR_CODE[r.status_code],\n pass_to)\n if settings.DEBUG:\n print(msg)\n messages.error(request, msg)\n return kickout(msg, format=fhir_call['format'], status_code=r.status_code)\n\n convert = json.loads(r.text, object_pairs_hook=OrderedDict)\n\n content = OrderedDict(convert)\n fhir_call['get_fmt'] = \"json\"\n fhir_call['pass_to'] = pass_to\n fhir_call['content'] = json.dumps(content, indent=4)\n\n return fhir_call",
"def post(self):\n\n action = self.request.get('action')\n if not action:\n raise ErrorMessage(404, 'missing action (requested_action) params')\n\n self.require_action_permitted('grant')\n\n account = model.Account.get(self.request.get('key'))\n if not account:\n raise ErrorMessage(404, 'bad key given')\n\n #TODO(eyalf): define account.display_name() or something\n name = account.email\n if not action in account.requested_actions:\n #i18n: Error message\n raise ErrorMessage(404, _('No pending request for '\n '%(account_action)s by %(user)s')\n % (action, name))\n account.requested_actions.remove(action)\n grant = self.request.get('grant', 'deny')\n if grant == 'approve':\n account.actions.append(action)\n account.put()\n logging.info('%s request for %s was %s' % (account.email,\n action,\n grant))\n\n if self.params.embed:\n if grant == 'approve':\n self.write(\n #i18n: Application for the given permission action approved\n _('Request for becoming %(action)s was approved.') % action)\n else:\n self.write(\n #i18n: Application for the given permission action denied\n _('Request for becoming %(action)s was denied.') % action)\n else:\n raise Redirect(self.get_url('/grant_access'))",
"def _process_vep_assertion(self, request):\n # Make sure they're using a GET request.\n if request.method != \"GET\":\n resp = Response()\n resp.status = 405\n resp.content_type = \"text/plain\"\n resp.body = \"token requests must get GET\"\n request.environ[\"repoze.who.application\"] = resp\n return None\n # Make sure they're sending an Authorization header.\n if not request.authorization:\n msg = \"you must provide an authorization header\"\n return self._respond_unauthorized(request, msg)\n # Grab the assertion from the Authorization header.\n scheme, assertion = request.authorization\n if scheme.lower() != \"browser-id\":\n msg = \"The auth scheme \\\"%s\\\" is not supported\" % (scheme,)\n return self._respond_bad_request(request, msg.encode(\"utf8\"))\n # Extract the audience, so we can check against wildcards.\n try:\n audience = get_assertion_info(assertion)[\"audience\"]\n except (ValueError, KeyError):\n return self._respond_bad_request(request, \"invalid assertion\")\n if not self._check_audience(request, audience):\n msg = \"The audience \\\"%s\\\" is not acceptable\" % (audience,)\n return self._respond_bad_request(request, msg.encode(\"utf8\"))\n # Verify the assertion and find out who they are.\n try:\n data = self.verifier.verify(assertion)\n except Exception, e:\n msg = \"Invalid BrowserID assertion: \" + str(e)\n return self._respond_bad_request(request, msg)\n # OK, we can go ahead and issue a token.\n token, secret, extra = self.token_manager.make_token(request, data)\n\n if token is None:\n msg = \"that email address is not recognised\"\n return self._respond_unauthorized(request, msg)\n resp = Response()\n resp.status = 200\n resp.content_type = \"application/json\"\n\n body = {\n \"id\": token,\n \"key\": secret,\n \"algorithm\": \"hmac-sha-1\",\n }\n\n if extra is not None:\n body.update(extra)\n\n resp.body = json.dumps(body)\n request.environ[\"repoze.who.application\"] = resp",
"def get(self):\n return {\"claims\": g.claims}, 200",
"def fhir_enquiry(request, context_override={}):\n\n state = get_state(CLIENT_ID,AUTH_URL)\n code = get_code(CLIENT_ID,AUTH_URL)\n\n # set default context\n context = {}\n context['template'] = \"result.html\"\n context['get_fmt'] = \"json\"\n context['display'] = \"Me\"\n context['code'] = code\n context['state'] = state\n context['ask'] = \"/api/v1/me?_format=json\"\n context['url'] = settings.OAUTH_TEST_INFO['BASE']\n context['headers'] = {'content-type': 'application/x-www-form-urlencoded',\n 'Authorization': \"Bearer \"+ get_code(CLIENT_ID, AUTH_URL)},\n\n # add / overwrite anything in context_override\n context = update_dict(context, context_override)\n\n data = {'code': code,\n 'grant_type': 'authorization_code',\n 'key': 'access_token',\n #'key': 'refresh_token',\n 'access_token': get_access(state),\n 'refresh_token': get_refresh(state),\n 'redirect_uri': REDIRECT_URI}\n\n if settings.DEBUG:\n print(\"Context after update:\", context)\n print(\"Data:\", data)\n\n print(\"SERVICE:\", SERVICE )\n\n # Get access_token\n headers = {}\n print('Context Headers:', dict(context['headers'][0]))\n #headers = {'headers': update_dict(headers, context_override=dict(context['headers'][0]))}\n headers = update_dict(headers, context_override=dict(context['headers'][0]))\n print(\"Headers:\", headers)\n\n kw_to_send = {'data': data, 'headers': headers}\n\n #session = SERVICE.get_auth_session(method=\"POST\",**kw_to_send)\n #session = SERVICE.get_session(get_access(state))\n #session = SERVICE.get_raw_access_token(method=\"POST\", **kw_to_send)\n session = SERVICE.get_raw_access_token(data=data)\n\n #response = SERVICE.get_access_token(method=\"POST\")\n # response = SERVICE.get_auth_session(data=data)\n print(\"Auth Session\", session)\n #response = SERVICE.get_raw_access_token(data=data, **headers)\n\n get_text = session.json()\n\n if 'access_token' in get_text:\n print(\"got an access token\")\n access = save_tokens(state,\n get_text['access_token'],\n get_text['refresh_token'])\n\n print(\"RESPONSE:\", get_text)\n # RESPONSE: {\"expires_in\": 36000,\n # \"access_token\": \"h1vY5eDu69JKfV4nPpdu8xEan63hKl\",\n # \"scope\": \"patient/*.read write_consent\",\n # \"token_type\": \"Bearer\",\n # \"refresh_token\": \"6HZnSwhfsGvfr9Aguw5n0e5CoGr8CQ\"}\n\n\n sesn = SERVICE.get_session(get_text['access_token'])\n print(\"SESSION:\", sesn)\n\n r = sesn.get(context['url'] + context['ask'])\n\n if settings.DEBUG:\n print(\"R:\", r.content)\n\n return r",
"async def patreon(self, ctx):\n await ctx.send(\"https://www.patreon.com/joinemm\")",
"def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])",
"def grant_token(request):\n\n grant_token_svc = request.find_service(name=\"grant_token\")\n h_user = request.lti_user.h_user\n\n return {\"grant_token\": grant_token_svc.generate_token(h_user)}",
"def do_mfa_verify(mfa_info):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_info[\"requestToken\"],\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/start-flow\"%mfa_info[\"mfaServerUrl\"],\n data=json.dumps({ \"state_transport\": \"polling\" }).encode(),\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA start flow error: %s\"%error) from None\n mfa_flow_info = json.loads(result)\n mfa_transaction_token = mfa_flow_info[\"transaction_token\"]\n # print(mfa_flow_info)\n # print(mfa_transaction_token)\n\n mfa_code = input(\"Please enter your MFA verification code: \")\n mfa_payload = {\n \"code\": mfa_code,\n \"type\": \"manual_input\"\n }\n mfa_payload_json = json.dumps(mfa_payload).encode()\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/verify-otp\"%mfa_info[\"mfaServerUrl\"],\n data=mfa_payload_json,\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA verify error: %s\"%error) from None\n # print(result)\n\n headers = {\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/transaction-state\"%mfa_info[\"mfaServerUrl\"],\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"Get MFA result error: %s\"%error) from None\n mfa_result = json.loads(result)\n if mfa_result[\"state\"] != \"accepted\":\n raise RuntimeError(\"MFA verification is not accepted: %s\"%result)\n # print(mfa_result)\n\n return mfa_result",
"def PostUserConsent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def claims(self, claims):\n\n self._claims = claims",
"def authorize(req, resp):\n api.redirect(resp, location=authorize_url())",
"def authorize(event, context):\n token = event['authorizationToken']\n log.debug(\"Token: {}\".format(token))\n principalId = token\n context = {\n 'simpleAuth': True,\n }\n\n table = dynamodb.Table(os.environ['ACCESSTOKENS_TABLE'])\n dbresponse = table.scan(\n FilterExpression=Attr('token').eq(token)\n )\n if len(dbresponse['Items']) == 1:\n if dbresponse['Items'][0]['enabled'] == True:\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = dbresponse['Items'][0]['name']\n else:\n policy = generatePolicy('deny', event['methodArn'])\n else:\n # Check if metasmoke has a new token matching this one\n url = \"https://metasmoke.erwaysoftware.com/smoke_detector/check_token/{}\".format(token)\n with urlopen(url) as response:\n ms_response = json.load(response)\n if ms_response[\"exists\"]:\n # Add the token to our table\n \n item = {\n 'token': token,\n 'name': ms_response[\"location\"],\n 'created_at': ms_response[\"created_at\"],\n 'modified_by': ms_response[\"owner_name\"],\n 'modified_at': ms_response[\"updated_at\"],\n 'enabled': True\n }\n\n table.put_item(Item=item)\n\n # Allow the requests\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = item['name']\n else:\n # No token matches. Deny the request\n policy = generatePolicy('deny', event['methodArn'])\n\n response = {\n 'principalId': principalId,\n 'policyDocument': policy,\n 'context': context\n }\n log.debug(response)\n return response",
"def credential_application_request(request, application):\n applicant_name = application.get_full_name()\n subject = f'{settings.SITE_NAME} credentialing application notification'\n body = loader.render_to_string(\n 'notification/email/notify_credential_request.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [application.user.email], fail_silently=False)",
"def request_verification_bypass(request, env, email):\n if request.method == 'POST':\n oauth_client = OAUTHCLIENT(env)\n token = oauth_client.get_token()\n content = {'message': email + \" has been requested for By-pass to \" + env}\n\n if 'access_token' in token:\n if env == 'qa32':\n host = 'http://qajb101.p2pcredit.local/users/email/'\n elif env == 'stg':\n host = 'http://stage-api-proxy-A.vip.c1.stg/users/email/'\n elif env == 'qa20':\n host = 'http://np97.c1.dev/users/email/'\n\n # create header with access token\n headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}\n\n # request email verification by-pass with access-token\n response = requests.get(\n host + email,\n headers=headers\n )\n\n response_json = response.json()\n\n # build response message\n if response_json['email_exists']:\n if response_json['activation_key'] == \"\":\n content['result'] = \"VERIFIED\"\n content['message'] = email + \" is auto-verified on \" + env\n else:\n content['result'] = \"NOT VERIFIED\"\n content['message'] = email + \" is not verified yet on \" + env + \\\n \". Please verify your email by clicking 'Verify Email' link.\"\n else:\n content['result'] = \"USER NOT FOUND\"\n content['message'] = email + \" is not found on \" + env\n\n response_status = status.HTTP_200_OK\n content['response'] = response_json\n else:\n content['result'] = str(token)\n response_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n content['response'] = 'No token generated'\n\n return Response(content, status=response_status)",
"def request(self, token):\n pass",
"def credential_verify(msg: VerifyJWTMessage):\n\n # Check if we have received some data in the POST\n jwt_cert = msg.payload\n if len(jwt_cert) == 0:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"No data received\")\n\n # Verify the certificate\n print(len(jwt_cert))\n print(jwt_cert)\n try:\n claims = safeisland.verify_cert_token(jwt_cert)\n except JWException as e:\n detail=str(e)\n log.error(e)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n except Exception as e:\n detail=str(e)\n log.error(detail)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n\n \n if claims is None:\n log.error(\"Verification of token failed\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"Verification of token failed\")\n\n # If we reached here, the JWT was verified and can return the claims in JSON format\n return {\"payload\": claims}",
"def send_mfa(self):\n\n try:\n response = self.post(\"/authentication/loginToken\",\n {\"user\": self.user, \"password\": self.password, \"TempCode\": True})\n except:\n print(\"Exception - unable to submit token request\")\n return False\n return True if response.status_code in [200, 204] else False",
"def authorization(txnId):\n otp = input(\"Enter OTP : \")\n response = post_request(\n url=BASE_URL + CONFIRM_OTP_URL,\n body={\n \"otp\": hashlib.sha256(str(otp).encode(\"utf-8\")).hexdigest(),\n \"txnId\": txnId\n }\n )\n if response.status_code == 200:\n return json.loads(response.text)['token']\n else:\n print(get_error_message(str(response.status_code)))\n exit(1)"
] | [
"0.58439505",
"0.55393654",
"0.52551943",
"0.5190258",
"0.5176609",
"0.51296705",
"0.51259375",
"0.51256275",
"0.5119167",
"0.50780183",
"0.5069017",
"0.50431377",
"0.50258064",
"0.49945018",
"0.49897614",
"0.49878263",
"0.49685344",
"0.49454448",
"0.49396595",
"0.49318913",
"0.49195212",
"0.488129",
"0.48317719",
"0.48263538",
"0.48173654",
"0.4800861",
"0.47976723",
"0.47945666",
"0.47648516",
"0.47441718"
] | 0.64909756 | 0 |
Compare two IP addresses. Return True if they are equal. False otherwise | def ip_address_match(self, ip1, ip2):
# Build IP objects
converted_ip1 = IPy.IP(ip1)
converted_ip2 = IPy.IP(ip2)
if converted_ip1 == converted_ip2:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare_ip(ip1, ip2):\n return cmp(normalize_ip(ip1), normalize_ip(ip2))",
"def is_ip_address_equal(ip1, ip2):\n if Convert.is_valid_ipv6_address(ip1) and Convert.is_valid_ipv6_address(ip2):\n if Convert.is_ipv6_equal(ip1, ip2):\n return True\n else:\n return ip1 == ip2\n\n return False",
"def test_ipv4_equality_internal_v6(self):\n ip1 = ip_address.IPAddress(\"192.168.178.4\")\n ip1_2 = ip_address.IPAddress(\"192.168.178.4\")\n \n ip2 = ip_address.IPAddress(\"10.168.178.4\")\n ip2_2 = ip_address.IPAddress(\"10.168.178.4\")\n \n assert ip1 == ip1_2\n assert ip2 == ip2_2\n assert ip1 != ip2",
"def compare_ip(self, curr_ip: str, peer: str):\n curr_nums = list(map(int, curr_ip.split(\".\")))\n peer_nums = list(map(int, peer.split(\".\")))\n # IPv4 have four 8-bit fields separated by periods (dotted quad)\n for i in range(4):\n if curr_nums[i] > peer_nums[i]:\n return 1\n elif curr_nums[i] < peer_nums[i]:\n return -1\n return 0",
"def compare_addresses(s1_1, s1_2, s2_1, s2_2):\n\n return ((s1_1 == s2_1) | (s1_2 == s2_2) | (s1_1 == s2_2) | (s1_2 == s2_1)).astype(float)",
"def is_ipv6_equal(ip1, ip2):\n return socket.inet_pton(socket.AF_INET6, ip1) == socket.inet_pton(socket.AF_INET6, ip2)",
"def isInSameNetwork(ip_add1, ip_add2, mask):\n if _check_ip(ip_add1) and _check_ip(ip_add2) and _check_ip(mask) \\\n and isValidMask(mask):\n ip1_num, = unpack(\"!I\", inet_aton(ip_add1))\n ip2_num, = unpack(\"!I\", inet_aton(ip_add2))\n mask_num, = unpack(\"!I\", inet_aton(mask))\n if ip1_num & mask_num != ip2_num & mask_num:\n return False\n else:\n return True",
"def is_ip(self,inputs):\n format = '((?:(?:25[0-5]|2[0-4]\\\\d|[01]?\\\\d?\\\\d)\\\\.){3}(?:25[0-5]|2[0-4]\\\\d|[01]?\\\\d?\\\\d))'\n pattern = re.match(format, inputs)\n if pattern is not None:\n return True\n else:\n return False",
"def matchIP(self, ip):\n return self._ip == ip",
"def is_actual_ip(self, ip_addr):\n try:\n socket.inet_aton(ip_addr)\n return True\n except socket.error:\n return False",
"def is_same_subnet(addr1, addr2, subnet) -> bool:\n\n if ipaddress.ip_network((addr1, subnet), strict=False) == ipaddress.ip_network(\n (addr2, subnet),\n strict=False,\n ):\n return True\n\n return False",
"def isNetConflict(ip_addr1, mask1, ip_addr2, mask2):\n subnet1 = calcSubnet(ip_addr1, mask1)\n if not subnet1:\n return False\n\n subnet2 = calcSubnet(ip_addr2, mask2)\n if not subnet2:\n return False\n\n if subnet1 == subnet2:\n return False",
"def equals(self, other):\n\n isEquals = False\n\n if self.ipv4 is not None:\n isEquals = self.ipv4.equals(other.ipv4) \n\n if isEquals is False:\n if self.ipv6 is not None:\n isEquals = self.ipv6.equals(other.ipv6) \n\n return isEquals",
"def check_if_ip(address):\n address_list = map(lambda x: int(x), address.split('.'))\n\n if len(address_list) != 4:\n return False\n for octet in address_list:\n if not 0 <= octet <= 255:\n return False\n if address_list[0] in [0, 10, 127, 255]:\n return False\n return True",
"def __gt__(self, other):\n return self.start_addr > other.start_addr",
"def isIP(ipToTest):\n \n try:\n socket.inet_aton(ipToTest)\n return True\n except socket.error:\n return False",
"def test_Ints_to_IPs(self):\n someInts = helpers.ints_to_IPs([\n 111239847,\n 167239847,\n 2291809961,\n 67306243,\n 0\n ])\n someIPs = [\n '00000110101000010110001010100111',\n '00001001111101111110000010100111',\n '10001000100110100011111010101001',\n '00000100000000110000001100000011',\n '00000000000000000000000000000000'\n ]\n self.assertEqual(len(someIPs), len(someInts))\n for someIndex in range(len(someInts)):\n self.assertEqual(someIPs[someIndex], someInts[someIndex])",
"def is_reserved(ip):\n if ip_between(ip, \"0.0.0.0\", \"0.255.255.255\"):\n return True\n elif ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"100.64.0.0\", \"100.127.255.255\"):\n return True\n elif ip_between(ip, \"127.0.0.0\", \"127.255.255.255\"):\n return True\n elif ip_between(ip, \"169.254.0.0\", \"169.254.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.0.0.0\", \"192.0.0.255\"):\n return True\n elif ip_between(ip, \"192.0.2.0\", \"192.0.2.255\"):\n return True\n elif ip_between(ip, \"192.88.99.0\", \"192.88.99.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n elif ip_between(ip, \"198.18.0.0\", \"198.19.255.255\"):\n return True\n elif ip_between(ip, \"198.51.100.0\", \"198.51.100.255\"):\n return True\n elif ip_between(ip, \"203.0.113.0\", \"203.0.113.255\"):\n return True\n elif ip_between(ip, \"224.0.0.0\", \"255.255.255.255\"):\n return True\n else:\n return False",
"def test_ipv4_from_binary_internal_v6(self):\n ip1 = ip_address.IPAddress(\"192.168.178.4\")\n ip1_2 = ip_address.IPAddress(ip1.bytes, binary=True)\n assert ip1 == ip1_2",
"def test_IPs_to_ints(self):\n self.assertEqual(helpers.int_to_IPv4(0), '0.0.0.0')\n self.assertEqual(helpers.int_to_IPv4(2291809961), '136.154.62.169')\n someInts = [\n 111239847,\n 167239847,\n 2291809961,\n 67306243,\n 0\n ]\n someIPs = helpers.IPs_to_ints([\n '00000110101000010110001010100111',\n '00001001111101111110000010100111',\n '10001000100110100011111010101001',\n '00000100000000110000001100000011',\n '00000000000000000000000000000000'\n ])\n self.assertEqual(len(someIPs), len(someInts))\n for someIndex in range(len(someInts)):\n self.assertEqual(someIPs[someIndex], someInts[someIndex])",
"def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")",
"def match(self, _ip):\n try:\n return bool(ip_address(_ip) in self.network)\n except ValueError:\n return False",
"def ip_between(ip, start, finish):\n\n if is_IPv4Address(ip) and is_IPv4Address(start) and is_IPv4Address(finish):\n return IPAddress(ip) in IPRange(start, finish)\n else:\n return False",
"def is_ip(self) -> bool:\n return self.typ == ETH_P_IP",
"def isValidIP(ip_add):\n if _check_ip(ip_add):\n return True\n return False",
"def is_ip(value):\n try:\n IP(value)\n except ValueError:\n return False\n return True",
"def __eq__(self, other):\n return self.storage_ip == other.storage_ip and self.client_port == other.client_port",
"def __lt__(self, other):\n return self.start_addr < other.start_addr",
"def test_ipv4_from_binary(self):\n ip1 = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n ip1_2 = ip_address.IPAddress(ip1.bytes, binary=True, force_v4=True)\n assert ip1 == ip1_2",
"def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255"
] | [
"0.78469706",
"0.7376112",
"0.73338",
"0.67397344",
"0.6569595",
"0.647767",
"0.641742",
"0.6412459",
"0.6405167",
"0.63363826",
"0.6331186",
"0.6294796",
"0.62394756",
"0.6137863",
"0.61093587",
"0.6021489",
"0.5972934",
"0.59692293",
"0.59515405",
"0.59456486",
"0.593293",
"0.5925398",
"0.5911506",
"0.5900736",
"0.58956635",
"0.58915174",
"0.5875186",
"0.58603966",
"0.58571565",
"0.5855901"
] | 0.8048186 | 0 |
Gets a Deployment Manager. [Arguments] | def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):
return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)",
"def GetManager(self):\r\n\r\n return self.manager",
"def get_manager():\n return __manager__",
"def getManager(self):\n return self._manager",
"def get_deployment_updates_manager(preview=False):\n if preview:\n return current_app.config.setdefault(\n 'deployment_updates_preview_manager',\n DeploymentUpdateManager(get_read_only_storage_manager())\n )\n return current_app.config.setdefault(\n 'deployment_updates_manager',\n DeploymentUpdateManager(get_storage_manager())\n )",
"def _get_package_manager():\n\n cosmos_url = _get_cosmos_url()\n cosmos_manager = cosmospackage.Cosmos(cosmos_url)\n if cosmos_manager.enabled():\n return cosmos_manager\n else:\n msg = (\"This version of the DCOS CLI is not supported for your \"\n \"cluster. Please downgrade the CLI to an older version: \"\n \"https://dcos.io/docs/usage/cli/update/#downgrade\"\n )\n raise DCOSException(msg)",
"def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager",
"def getServiceManager( cHost=\"localhost\", cPort=\"2002\" ):\n global goServiceManager\n global pythonloader\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n\n goServiceManager=oLocalContext.ServiceManager\n\n return goServiceManager",
"def fusion_api_create_deployment_manager(self, body, api=None, headers=None):\n return self.dep_mgr.create(body=body, api=api, headers=headers)",
"def get_manager():\n\n return multiprocessing.Manager()",
"def get_available_package_manager(self):\n for manager in self.package.keys():\n try:\n executable = self.SUPPORTED_PACKAGE_MANAGERS[manager]\n if is_executable_exists(executable):\n return manager\n except KeyError:\n raise NotImplementedError(\"{} is not supported\".format(manager))\n raise NotImplementedError(\"This system doesn't have any of the \"\n 'supported package manager(s): '\n '{}'.format(','.join(self.package.keys())))",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def plugins_get_mgr():\n global pluginmgr\n return pluginmgr",
"def get_parser():\n\n parser = parser.ArgumentParser()\n return parser",
"def getServiceManager( cHost=\"localhost\", cPort=\"8100\" ):\n global goServiceManager\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", oLocalContext )\n # Connect to the running OpenOffice.org and get its context.\n oContext = oLocalResolver.resolve( \"uno:socket,host=\" + cHost + \",port=\" + cPort + \";urp;StarOffice.ComponentContext\" )\n # Get the ServiceManager object\n goServiceManager = oContext.ServiceManager\n return goServiceManager",
"def get(uuid):\n pmanager = PushManager.query.filter_by(\n uuid=uuid\n ).one_or_none()\n if pmanager is None:\n raise GatlinException(\"App not exist\", 404)\n return pmanager",
"def getAPIsManager(self):\n return self.apisManager",
"def _retrieve_manager(provider_id):\n provider = _retrieve_provider(provider_id)\n MachineManager = provider.get_provider_manager()\n return MachineManager(provider)",
"def get_metadata_manager(config):\n\n context = config.contextualization_type\n metadata_manager_class = '%sMetadataManager' % context\n if not (metadata_manager_class in globals()):\n raise NotImplementedError('Implementation for %s not available' % context)\n return (globals()[metadata_manager_class])(config)",
"def get_podmanager_by_uuid(cls, podmanager_uuid):\n return cls.dbdriver.get_podmanager_by_uuid(podmanager_uuid)",
"def getPlatformMaintainer(self, name, email):\r\n if self.platform_maintainers.has_key(name):\r\n return self.platform_maintainers[name]\r\n else:\r\n self.platform_maintainers[name] = PlatformMaintainer(name, email)\r\n return self.platform_maintainers[name]",
"def getPackageManager(self) -> None:\n\t\tfor pkgmgr in config.SUPPORTED_PACKAGE_MGRS:\n\t\t\tif subprocess.run([\"which\", pkgmgr]).returncode == 0:\n\t\t\t\tself.package_manager = pkgmgr\n\t\t\t\treturn\n\t\tlogger.error(\"Supported package manager not found, aborting.\")\n\t\traise ValueError(\"Package manager unsupported\")",
"def get_task_manager(task_manager=None):\n global _task_manager\n if _task_manager is None:\n if task_manager is None:\n _task_manager = TaskManagerImpl()\n else:\n constructor = dynamic_import(task_manager)\n _task_manager = constructor()\n\n return _task_manager",
"def mgmt_tool(self) -> MgmtClient:\n return self._mgmt_tool",
"def get_entity_manager(self):\n return self.game.entity_manager",
"def get_instance():\n if PersistenceManager._instance is None:\n PersistenceManager._instance = PersistenceManager()\n return PersistenceManager._instance",
"def manager(self):\n if \"manager\" in self._prop_dict:\n if isinstance(self._prop_dict[\"manager\"], OneDriveObjectBase):\n return self._prop_dict[\"manager\"]\n else :\n self._prop_dict[\"manager\"] = DirectoryObject(self._prop_dict[\"manager\"])\n return self._prop_dict[\"manager\"]\n\n return None",
"def get_instance(cls):\n global DNS_MANAGER_API\n if not DNS_MANAGER_API:\n DNS_MANAGER_API = cls()\n return DNS_MANAGER_API",
"def get_device_manager(device_model: str):\n return _get_device_handler_or_manager(device_model, True)",
"def get_extension_manager(self):\n return get_extension_manager()"
] | [
"0.6725439",
"0.64918023",
"0.62597597",
"0.6233775",
"0.61700165",
"0.6023853",
"0.58021766",
"0.57533944",
"0.57072103",
"0.5679756",
"0.5606789",
"0.5581873",
"0.55539227",
"0.5527742",
"0.55214655",
"0.5490343",
"0.54241663",
"0.53941226",
"0.53548354",
"0.5341178",
"0.53381634",
"0.5304182",
"0.52717113",
"0.5266244",
"0.52193284",
"0.5217716",
"0.5213256",
"0.51829773",
"0.51490855",
"0.51478493"
] | 0.7509083 | 0 |
Creates a Deployment Manager. [Arguments] | def fusion_api_create_deployment_manager(self, body, api=None, headers=None):
return self.dep_mgr.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_podmanager(cls, values):\n return cls.dbdriver.create_podmanager(values)",
"def create_manager(self, username, tenancy):\n raise NotImplementedError",
"def create_manager(app, db):\n manager = Manager(app)\n\n manager.add_command(\"runserver\", Server())\n\n def make_shell_context():\n return dict(app=app, db=db)\n manager.add_command(\"shell\", Shell(make_context=make_shell_context))\n\n return manager",
"def create_deployment(StackId=None, AppId=None, InstanceIds=None, LayerIds=None, Command=None, Comment=None, CustomJson=None):\n pass",
"def test_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\"path\": \"manager?project=ProjectTest\", \"action\": \"redirect\", \"status\": \"success\"},\n status=200\n )\n\n self.azk.create(self.project, self.description)",
"def test_create_deployment(self):\n pass",
"def cmd_apps__create(args):\n \n if args.name is None:\n args.name = os.path.basename(os.getcwd())\n\n url = remote.create_project(args.name)\n \n if in_git_repo():\n if get_push_url('tinyserv') is None:\n git(None, 'remote', 'add', 'tinyserv', url)\n print \"Added remote 'tinyserv'.\"\n else:\n print \"This repository is already configured for app '%s'.\" % \\\n _get_current_project_name()\n \n print \"Remote repository URL is %s.\" % url",
"def fusion_api_create_hypervisor_manager(self, body, api=None, headers=None):\n return self.hypervisor_mgr.create(body=body, api=api, headers=headers)",
"def setup_args_create(parser):\n parser.add_argument(\"--domain\", required=False)\n parser.add_argument(\"--ansible\", required=False,\n dest=\"ansible\", action=\"store_true\")\n return parser",
"def create_and_run_deployment(\n project_id: int = Form(...),\n model_id: Text = Form(...),\n version: Text = Form(...),\n model_uri: Text = Form(...),\n type: Text = Form(...) # pylint: disable=redefined-builtin\n) -> JSONResponse:\n\n deploy_manager = DeployManager()\n deployment_id = deploy_manager.create_deployment(\n project_id, model_id, version, model_uri, type\n )\n return JSONResponse({'deployment_id': str(deployment_id)}, HTTPStatus.ACCEPTED)",
"def create_application(name=None, description=None):\n pass",
"def create_plugin_manager():\n plugin_manager = PiPluginManager(hookspecs.hookspec.project_name)\n plugin_manager.add_hookspecs(hookspecs)\n return plugin_manager",
"def createManagedDomain():\n selectCustomTemplate(localTemplate)\n loadTemplates()\n # set the Node Manager listen address and listen port.\n cd('/')\n cd('NMProperties')\n set('ListenAddress', hostname)\n #create the domain\n writeDomain(domainPath)",
"def create_app(StackId=None, Shortname=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass",
"def create_environment(args):\n env.username = args.user\n env.password = args.password\n env.service_url = args.service_url\n env.quiet = args.quiet\n env.verbose = args.verbose\n env.manifest = args.manifest\n env.debug = args.debug\n env.always_confirm = args.yes\n env.args = args\n env.api = ravello.RavelloClient(env.username, env.password, env.service_url)",
"def create(self, dependencies):\n deps = ' '.join([f\"'{_}'\"\n .replace(' >=', '>=')\n .replace(' <=', '<=')\n .replace(' ', '=')\n .replace('*', '')\n for _ in dependencies])\n try:\n utils.run_in_bash(\n f'{CONDA_BIN} create -y -q -n {self.name} {deps}')\n except CalledProcessError as err:\n inform.error(f'Couldn\\'t create environment {self.name}. '\n 'Following error occured:')\n print(err.output.strip().decode('ascii'))\n inform.error('Please check your meta.yaml-file and if '\n 'dependencies are available.')\n inform.critical()",
"async def create_bot_manager(self, guild):\n role_settings = {\"name\": self.manager_role,\n \"permissions\": discord.Permissions.all(),\n \"hoist\": False,\n \"mentionable\": False,\n \"color\": discord.Colour.from_rgb(0, 0, 1)}\n await guild.create_role(**role_settings)",
"def create_manager(\n pdb_hierarchy,\n geometry_restraints_manager,\n fmodel,\n wavelength,\n params,\n resolution_factor = 0.25,\n nproc = Auto,\n verbose = False,\n log = None,\n manager_class=None):\n connectivity = \\\n geometry_restraints_manager.shell_sym_tables[0].full_simple_connectivity()\n if (manager_class is None):\n manager_class = manager\n manager_obj = manager_class(\n fmodel = fmodel,\n pdb_hierarchy = pdb_hierarchy,\n xray_structure = fmodel.xray_structure,\n connectivity = connectivity,\n wavelength = wavelength,\n params = params,\n nproc = nproc,\n verbose = verbose,\n log = log)\n return manager_obj",
"def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv",
"def _create_app_instance(script_info):\n return create_app()",
"def cmd_creator():\n return OpenMayaMPx.asMPxPtr(AzureBatchSetup())",
"def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )",
"def create_app(self):\n raise NotImplementedError",
"def create(self, **kwargs):\n resource = self.resource.create(kwargs)\n if 'admin_token' in kwargs:\n resource.context.authorize('Gem-Application',\n api_token=resource.api_token,\n admin_token=kwargs['admin_token'])\n app = self.wrap(resource)\n return self.add(app)",
"def new_deployment(request, recipe, **_kwargs):\n return create_view(\n request, _(\"Deployment of recipe '%s'\") % recipe, DeploymentForm, recipe=recipe\n )",
"def createManager(firstName, lastName, ssn, salary, title, yearBonus):\n manager = Manager(firstName, lastName, ssn, salary, title, yearBonus)\n if firstName != manager.firstName or \\\n lastName != manager.lastName or \\\n ssn != manager.ssn or \\\n salary != manager.salary or \\\n title != manager.title or \\\n yearBonus != manager.yearBonus:\n raise ValueError(\"Failed to initialize Manager\")\n return manager",
"def create():\n\n return App()",
"def create_model(ModelName=None, PrimaryContainer=None, Containers=None, ExecutionRoleArn=None, Tags=None, VpcConfig=None, EnableNetworkIsolation=None):\n pass",
"def step_create(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console create \"\n '--extended-location name={extendedLocation} type=\"CustomLocation\" --location {location} '\n \"--enabled {enabled} --expiration {expiration} --tags {tags} \"\n \"--ssh-public-key {sshPublicKey} --resource-group {resourceGroup} \"\n \"--virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )",
"def create_stack(Name=None, Description=None, DisplayName=None, StorageConnectors=None, RedirectURL=None, FeedbackURL=None, UserSettings=None, ApplicationSettings=None):\n pass"
] | [
"0.6148503",
"0.6106779",
"0.59578663",
"0.59382886",
"0.5600401",
"0.5570314",
"0.5496767",
"0.5469237",
"0.5436702",
"0.54037696",
"0.53826296",
"0.53341144",
"0.5308474",
"0.53061575",
"0.5288264",
"0.5222923",
"0.51940596",
"0.51562995",
"0.5129845",
"0.51202434",
"0.51106274",
"0.5109671",
"0.51069176",
"0.50966483",
"0.5091802",
"0.50879455",
"0.5075071",
"0.5071832",
"0.50599194",
"0.50590116"
] | 0.72286177 | 0 |
Updates a Deployment Manager. [Arguments] | def fusion_api_update_deployment_manager(self, body=None, uri=None, api=None, headers=None):
return self.dep_mgr.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def manager_update(self, manager, config):\n self.request('/v1.1/managers/configs/%s' % manager, 'POST', body=config)",
"def update_podmanager(cls, podmanager_uuid, values):\n return cls.dbdriver.update_podmanager(podmanager_uuid, values)",
"def fusion_api_update_hypervisor_manager(self, body=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.update(body=body, uri=uri, api=api, headers=headers)",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def fusion_api_create_deployment_manager(self, body, api=None, headers=None):\n return self.dep_mgr.create(body=body, api=api, headers=headers)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def upsert(version_manager, request):\n return version_manager.save_version_manager()",
"def update(self, args):\n pass",
"def edit_deployment(request, deployment, **_kwargs):\n pass",
"async def light_manager_update(request: Request, call_next):\n\n logger.debug(\"pre manager.update\")\n busylightapi.manager.update()\n logger.debug(\"post manager.update\")\n return await call_next(request)",
"def update(self, system, environment_input):\n pass",
"def update(*args):",
"def get_deployment_updates_manager(preview=False):\n if preview:\n return current_app.config.setdefault(\n 'deployment_updates_preview_manager',\n DeploymentUpdateManager(get_read_only_storage_manager())\n )\n return current_app.config.setdefault(\n 'deployment_updates_manager',\n DeploymentUpdateManager(get_storage_manager())\n )",
"def update(self, adt=None, url=None, params=None):\n if not self._id_exists():\n abort(404, f\"Application with ID {self.app_id} does not exist\")\n elif not self.engine.app_list:\n abort(404, \"There are no currently running applications\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, validate_only=True)\n try:\n self.engine.update(self.app_id, tpl, adaps)\n except Exception as error:\n abort(500, f\"Error while updating: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully updated\"}",
"def test_update_deployment(self):\n pass",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)",
"def do_project_update(cs, args):\n raise NotImplementedError",
"def run_update():\n parser = ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\",\n help=\"Use <command> --help for more information about command.\")\n\n parser_result = subparsers.add_parser('result',\n description=\"Changes metadata of result file(s).\",\n help=\"Change result file metadata.\")\n parser_result.add_argument('name',nargs='?',default=None,help=\"Results file or directory with result files\")\n parser_result.add_argument('-a','--arch',help=\"Update result(s): set ARCH\")\n parser_result.add_argument('-p','--person',help=\"Update result(s): set PERSON\")\n parser_result.add_argument('-s','--sequence',type=int,help=\"Update result(s): set SEQUENCE NUMBER\")\n parser_result.set_defaults(func=script_runner.cmd_update_results)\n\n parser_repository = subparsers.add_parser('repository',\n description=\"Update local test repository from Firebird project Subversion repository.\",\n help=\"Update test repository.\")\n parser_repository.set_defaults(func=script_runner.cmd_update_repository)\n\n args = parser.parse_args()\n args.func(args)",
"def update_app(self):\n\n param = self.chose_param_value(\"--app\")\n self._check_path_availability([\"get_project_dir\", \"get_project_dir_to\"])\n if self._check_whether_has_params(param):\n self.updater.update_files(\n self.analizer.get_project_dir(),\n self.analizer.get_project_dir_to(),\n param\n )\n return self.write_debug_message(\"App files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about application files\")",
"async def update(\n app: AppIdentity,\n repo: str,\n id: str,\n name: str,\n):\n repo = RepoName.parse(repo)\n\n action = checks.UpdateRun(\n owner=repo.owner,\n repo=repo.repo,\n run=checks.RunDetails(\n id=id,\n name=name,\n status=checks.Status.in_progress,\n ))\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n\n async with action.execute(sesh) as resp:\n logging.debug(resp)\n\n try:\n resp.raise_for_status()\n except Exception:\n logging.exception((await resp.json())[\"message\"])\n raise\n\n print(await resp.json())",
"def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_edit_san_manager(self, body, uri, api=None, headers=None):\n return self.dm.update(body, uri, api, headers)",
"def update(self,\n provider_id,\n provider_deployment_map_id,\n provider_deployment_map,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'provider_deployment_map_id': provider_deployment_map_id,\n 'provider_deployment_map': provider_deployment_map,\n })",
"def update(self, context, id_, update_data):\n run_playbook = update_data.get(\"run_playbook\", True)\n\n try:\n _validate_update(context, self.db_api, update_data, id_,\n eon_const.EON_RESOURCE_MANAGER)\n _resource_mgr_data = _make_response(\n self.db_api.get_resource_manager(context, id_))\n resource_mgr_type = _resource_mgr_data.get('type')\n resource_mgr_driver = driver.load_resource_mgr_driver(\n resource_mgr_type)\n\n if resource_mgr_type == eon_const.EON_RESOURCE_MGR_TYPE_VCENTER:\n name = update_data.get(\"name\")\n if name and name != _resource_mgr_data.get(\"name\"):\n msg = (_(\"vCenter name cannot be updated\"))\n raise exception.UpdateException(msg=msg)\n\n _resource_mgr_data_update = deepcopy(_resource_mgr_data)\n _resource_mgr_data_update.update(update_data)\n LOG.info(\"Updating resource manager : %s\",\n logging.mask_password(_resource_mgr_data_update))\n\n _is_creds_changed = self._is_creds_changed(\n _resource_mgr_data, _resource_mgr_data_update)\n if _is_creds_changed:\n LOG.debug(\"[%s] Validating the updated credentials/Ip \"\n \"address\" % id_)\n resource_mgr_driver.validate_update(_resource_mgr_data_update,\n _resource_mgr_data)\n # Gets the activated resources for the resource manager\n resources_data = self._get_resources(context,\n _resource_mgr_data_update,\n eon_const.EON_RESOURCE_STATE_ACTIVATED)\n\n resource_mgr_driver.update_vc_pass_through(\n context, _resource_mgr_data_update)\n if resources_data and run_playbook:\n self.db_api.update_resource_mgr_property(context,\n \"update_property\",\n id_, key=eon_const.RESOURCE_MGR_STATE_KEY,\n value=eon_const.EON_RESOURCE_MANAGER_STATE_UPDATING)\n eventlet.spawn_n(resource_mgr_driver.update,\n context, id_, resource_inventory=resources_data)\n\n self.db_api.update_resource_manager(context, id_,\n _resource_mgr_data_update)\n props = self.db_api.get_resource_mgr_properties(context,\n id_, key=eon_const.RESOURCE_MGR_STATE_KEY)\n return _make_response(_resource_mgr_data_update,\n property_list=props)\n\n except Exception as e:\n LOG.exception(e)\n msg = (_(\"Updating resource manager failed. Reason: '%s'\")\n % e.message)\n log_msg = ((\"Updating resource manager failed. Reason: '%s'\")\n % e.message)\n LOG.error(log_msg)\n raise exception.UpdateException(msg=msg)",
"def vm_update(args):\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_descrmation(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)",
"def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def update_app(AppId=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass",
"def update(self, *args, **kwargs):\n pass"
] | [
"0.66454226",
"0.6606417",
"0.5710404",
"0.5449812",
"0.5437378",
"0.5316729",
"0.5316729",
"0.53141195",
"0.52908653",
"0.52317894",
"0.5228793",
"0.520537",
"0.52035236",
"0.5133824",
"0.51167214",
"0.5114726",
"0.5045032",
"0.503883",
"0.5027585",
"0.49912292",
"0.49884212",
"0.49776164",
"0.49753323",
"0.4971789",
"0.49608052",
"0.49212644",
"0.49210563",
"0.4914746",
"0.49125367",
"0.48969024"
] | 0.7084983 | 0 |
Deletes a Deployment Manager. [Arguments] | def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):
return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_podmanager(cls, podmanager_uuid):\n cls.dbdriver.delete_podmanager(podmanager_uuid)",
"def manager_remove(self, manager):\n self.request('/v1.1/managers/configs/%s' % manager, 'DELETE')",
"def delete_deployment(request, deployment, **_kwargs):\n pass",
"def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self, uuid):\n try:\n pmanager = PushManager.query.filter_by(\n uuid=uuid\n ).one_or_none()\n if pmanager is None:\n raise GatlinException(\"App not exist\", 404)\n self._provider.delete_platform(pmanager.sns_arn)\n pmanager.delete()\n except GatlinException as exception:\n raise exception",
"def delete(args):\n if args.tag is not None:\n tag = str(args.tag)\n interface = DigitalOceanSetup.create_interface()\n # Delete everything matching the tag\n interface.destroy_machines_by_tag(tag)\n elif args.delete_list:\n server_list = read_server_file()\n if len(server_list) == 1:\n interface = DigitalOceanSetup.create_interface()\n droplet_details = server_list[0]\n # Download the save game from the server\n if args.save:\n eprint(\"Running Ansible...\")\n os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] = \"False\"\n process = subprocess.Popen([\"ansible-playbook\", \"-i\",\n droplet_details[\"name\"] + \",\",\n \"--private-key\", \"~/.ssh/id_rsa\",\n \"save-factorio.yml\"],\n stdout=subprocess.PIPE)\n out, _ = process.communicate()\n eprint(out)\n # Now destory the droplet\n interface.destroy_machine_by_id(droplet_details[\"id\"])\n # Save empty list to file\n save_dict_to_file(\"servers.json\", [])\n else:\n eprint(\"Too many or no items in server list.\")\n else:\n eprint(\"Missing arguments.\")",
"def delete(self):\n self.manager.delete(self)",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def delete_controller(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns cluser delete name\")\n config.delete_object(name=args[0], kind='Controller')",
"def test_delete_deployment(self):\n pass",
"def rm(args):\n args.delete = True\n return remove(args)",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # prefix is explained in the service_create().\n service = 'tripleo_' + container\n\n sysd_unit_f = systemctl.format_name(service)\n sysd_health_f = systemctl.format_name(service + '_healthcheck')\n sysd_timer_f = service + '_healthcheck.timer'\n sysd_health_req_d = sysd_unit_f + '.requires'\n\n for sysd_f in sysd_unit_f, sysd_health_f, sysd_timer_f:\n if os.path.isfile(sysdir + sysd_f):\n log.debug('Stopping and disabling systemd service for %s' %\n service)\n try:\n systemctl.stop(sysd_f)\n systemctl.disable(sysd_f)\n except systemctl.SystemctlException:\n log.exception(\"systemctl failed\")\n raise\n log.debug('Removing systemd unit file %s' % sysd_f)\n os.remove(sysdir + sysd_f)\n else:\n log.info('No systemd unit file was found for %s' % sysd_f)\n\n # Now that the service is removed, we can remove its \".requires\"\n if os.path.exists(os.path.join(sysdir, sysd_health_req_d)):\n log.info('Removing healthcheck require for %s' % service)\n shutil.rmtree(os.path.join(sysdir, sysd_health_req_d))",
"def delete_container(self, container: Container):",
"def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)",
"def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})",
"def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"",
"def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')",
"def delete(self, request, m_name):\n machine = Machine.objects.get(name=m_name)\n machine.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete_command(ctx, path, change_set_name, yes):\n context = SceptreContext(\n command_path=path,\n command_params=ctx.params,\n project_path=ctx.obj.get(\"project_path\"),\n user_variables=ctx.obj.get(\"user_variables\"),\n options=ctx.obj.get(\"options\"),\n ignore_dependencies=ctx.obj.get(\"ignore_dependencies\"),\n full_scan=True,\n )\n\n plan = SceptrePlan(context)\n plan.resolve(command=\"delete\", reverse=True)\n\n if change_set_name:\n delete_msg = (\n \"The Change Set will be delete on the following stacks, if applicable:\\n\"\n )\n else:\n delete_msg = \"The following stacks, in the following order, will be deleted:\\n\"\n\n dependencies = \"\"\n for stack in plan:\n dependencies += \"{}{}{}\\n\".format(Fore.YELLOW, stack.name, Style.RESET_ALL)\n\n print(delete_msg + \"{}\".format(dependencies))\n\n confirmation(\n plan.delete.__name__, yes, change_set=change_set_name, command_path=path\n )\n if change_set_name:\n plan.delete_change_set(change_set_name)\n else:\n responses = plan.delete()\n exit(stack_status_exit_code(responses.values()))",
"def test_delete_deployment_run(self):\n pass",
"def del_segm(*args):\n return _ida_segment.del_segm(*args)",
"def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)",
"def delete_container(ContainerName=None):\n pass",
"async def delete_model(\n delete_model_request: DeleteModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/delete_model endpoint\")\n logging.debug(f\"Request: {delete_model_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().delete_model_controller(\n request=delete_model_request\n )\n return ManageModelResponse(**response)\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/delete_model endpoint: {error}\")\n raise error"
] | [
"0.69262546",
"0.66423124",
"0.6501913",
"0.6232213",
"0.5951453",
"0.5951453",
"0.58923715",
"0.5875943",
"0.58508486",
"0.57864344",
"0.5729849",
"0.57091665",
"0.5703853",
"0.56583726",
"0.5641767",
"0.5613142",
"0.5593527",
"0.55735373",
"0.5569031",
"0.55578",
"0.555631",
"0.5546045",
"0.549666",
"0.5492266",
"0.5489849",
"0.5473848",
"0.54641503",
"0.5461764",
"0.546026",
"0.5447403"
] | 0.7267033 | 0 |
Gets a hypervisor Manager. [Arguments] | def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):
return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetManager(self):\r\n\r\n return self.manager",
"def get_manager():\n\n return multiprocessing.Manager()",
"def getManager(self):\n return self._manager",
"def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)",
"def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager",
"def get_manager():\n return __manager__",
"def fusion_api_create_hypervisor_manager(self, body, api=None, headers=None):\n return self.hypervisor_mgr.create(body=body, api=api, headers=headers)",
"def plugins_get_mgr():\n global pluginmgr\n return pluginmgr",
"def getProxyManager(address=None):\n return __mgr_cache__[address]",
"def get_mgr(cls, id):\n assert id in cls.s_memory_mgrs, 'invalid id[%s] for memory managers' % (\n id)\n return cls.s_memory_mgrs[id]",
"def get_device_manager(device_model: str):\n return _get_device_handler_or_manager(device_model, True)",
"def _retrieve_manager(provider_id):\n provider = _retrieve_provider(provider_id)\n MachineManager = provider.get_provider_manager()\n return MachineManager(provider)",
"def getServiceManager( cHost=\"localhost\", cPort=\"2002\" ):\n global goServiceManager\n global pythonloader\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n\n goServiceManager=oLocalContext.ServiceManager\n\n return goServiceManager",
"def get_health_monitor(self):\n return self.manager.get_health_monitor(self)",
"def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info",
"def GetAuiManager(self):\r\n\r\n return self._mgr",
"def get_entity_manager(self):\n return self.game.entity_manager",
"def systems_manager_agent(self) -> Optional['outputs.ImageRecipeSystemsManagerAgent']:\n return pulumi.get(self, \"systems_manager_agent\")",
"def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]",
"def manager():\n pass",
"def get_provider_manager(osid, runtime=None, proxy=None, local=False):\n if runtime is not None and not local:\n try:\n # Try to get the manager from the runtime, if available:\n config = runtime.get_configuration()\n parameter_id = Id('parameter:' + osid.lower() + 'ProviderImpl@mongo')\n impl_name = config.get_value_by_parameter(parameter_id).get_string_value()\n return runtime.get_manager(osid, impl_name) # What about ProxyManagers?\n except (AttributeError, KeyError, NotFound):\n pass\n # Try to return a Manager from this implementation, or raise OperationFailed:\n try:\n module = import_module('dlkit.mongo.' + osid.lower() + '.managers')\n manager = getattr(module, osid.title() + 'Manager')()\n except (ImportError, AttributeError):\n raise OperationFailed()\n if runtime is not None:\n manager.initialize(runtime)\n return manager",
"def getFeatureManager(address=None):\n return __mgr_cache__[address]",
"def getServiceManager( cHost=\"localhost\", cPort=\"8100\" ):\n global goServiceManager\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", oLocalContext )\n # Connect to the running OpenOffice.org and get its context.\n oContext = oLocalResolver.resolve( \"uno:socket,host=\" + cHost + \",port=\" + cPort + \";urp;StarOffice.ComponentContext\" )\n # Get the ServiceManager object\n goServiceManager = oContext.ServiceManager\n return goServiceManager",
"def _FindPaneManager(self):\n event = aui.AuiManagerEvent(aui.wxEVT_AUI_FIND_MANAGER)\n self.ProcessEvent(event)\n return event.GetManager()",
"def get_managers():\n return {'managers': get_users('managers')}",
"def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body",
"def getProcessManager(self): \n \n return self.procmgr",
"def get_hypervisor(self, graph_db):\n node = neo_resource.get_node_by_property(graph_db,\n self.label,\n property_key='hostname',\n property_value=self.hostname)\n return node",
"def mgmt_tool(self) -> MgmtClient:\n return self._mgmt_tool"
] | [
"0.6979449",
"0.67611295",
"0.66910297",
"0.6666388",
"0.66302234",
"0.65858084",
"0.6492841",
"0.61952776",
"0.61311185",
"0.6104131",
"0.59339416",
"0.58791596",
"0.5862384",
"0.5799686",
"0.57992333",
"0.57717127",
"0.57673347",
"0.5757488",
"0.5752421",
"0.5737294",
"0.57342094",
"0.5718659",
"0.5673235",
"0.5633545",
"0.5576884",
"0.5549024",
"0.5543657",
"0.55370295",
"0.55298835",
"0.5493846"
] | 0.800843 | 0 |
Creates a hypervisor Manager. [Arguments] | def fusion_api_create_hypervisor_manager(self, body, api=None, headers=None):
return self.hypervisor_mgr.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_manager(self, username, tenancy):\n raise NotImplementedError",
"def create_manager(\n pdb_hierarchy,\n geometry_restraints_manager,\n fmodel,\n wavelength,\n params,\n resolution_factor = 0.25,\n nproc = Auto,\n verbose = False,\n log = None,\n manager_class=None):\n connectivity = \\\n geometry_restraints_manager.shell_sym_tables[0].full_simple_connectivity()\n if (manager_class is None):\n manager_class = manager\n manager_obj = manager_class(\n fmodel = fmodel,\n pdb_hierarchy = pdb_hierarchy,\n xray_structure = fmodel.xray_structure,\n connectivity = connectivity,\n wavelength = wavelength,\n params = params,\n nproc = nproc,\n verbose = verbose,\n log = log)\n return manager_obj",
"def create_manager(app, db):\n manager = Manager(app)\n\n manager.add_command(\"runserver\", Server())\n\n def make_shell_context():\n return dict(app=app, db=db)\n manager.add_command(\"shell\", Shell(make_context=make_shell_context))\n\n return manager",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def create_plugin_manager():\n plugin_manager = PiPluginManager(hookspecs.hookspec.project_name)\n plugin_manager.add_hookspecs(hookspecs)\n return plugin_manager",
"def create_podmanager(cls, values):\n return cls.dbdriver.create_podmanager(values)",
"def createMachine():\n cd('/')\n machine = create(machineName, 'UnixMachine')\n cd('Machines/'+machineName+'/NodeManager/'+machineName)\n cmo.setName(machineName)\n cmo.setListenAddress(hostname)",
"def create_machine(self, rack, hyp, address, user, password,\n datastore, vswitch):\n log.info(\"Adding %s hypervisor at %s...\" % (hyp, address))\n datacenter = rack.getDatacenter()\n\n # Discover machine info with the Discovery Manager remote service\n machine = datacenter.discoverSingleMachine(address, hyp,\n user, password)\n for ds in machine.getDatastores():\n log.debug(\"Datastore found: %s-%s\" %\n (ds.getName(), ds.getRootPath()))\n\n # Verify that the desired datastore and virtual switch exist\n datastore = machine.findDatastore(datastore)\n nst = datacenter.defaultNetworkServiceType()\n vswitch = machine.findAvailableVirtualSwitch(vswitch)\n\n datastore.setEnabled(True)\n vswitch.setNetworkServiceType(nst)\n machine.setRack(rack)\n\n machine.save()\n\n return machine",
"def test_initialize_hypervisor(self, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar', 'hypervisor': 'baz'})\n libvirt_mock.open.assert_called_with('baz')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)",
"async def create_bot_manager(self, guild):\n role_settings = {\"name\": self.manager_role,\n \"permissions\": discord.Permissions.all(),\n \"hoist\": False,\n \"mentionable\": False,\n \"color\": discord.Colour.from_rgb(0, 0, 1)}\n await guild.create_role(**role_settings)",
"def manager():\n pass",
"def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent",
"def init_manager(params: dict) -> SyslogManager:\n address = params.get('address')\n protocol = params.get('protocol', UDP).lower()\n facility = FACILITY_DICT.get(params.get('facility', 'LOG_SYSLOG'), SysLogHandler.LOG_SYSLOG)\n logging_level = LOGGING_LEVEL_DICT.get(params.get('priority', 'LOG_INFO'), INFO)\n certificate: Optional[str] = (replace_spaces_in_credential(params.get('certificate', {}).get('password'))\n or params.get('certificate', None))\n certificate_path: Optional[str] = None\n default_port: int = DEFAULT_TLS_SYSLOG_PORT if protocol == 'tls' else DEFAULT_TCP_SYSLOG_PORT\n port = arg_to_number(params.get('port'), required=False) or default_port\n self_signed_certificate = params.get('self_signed_certificate', False)\n if not address:\n raise DemistoException('A address must be provided.')\n if port and (port < 0 or MAX_PORT < port):\n raise DemistoException(f'Given port: {port} is not valid and must be between 0-{MAX_PORT}')\n if protocol == 'tls' and not certificate:\n raise DemistoException('A certificate must be provided in TLS protocol.')\n if certificate and protocol == 'tls':\n certificate_path = prepare_certificate_file(certificate)\n return SyslogManager(address, port, protocol, logging_level, facility, certificate_path, self_signed_certificate)",
"def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )",
"def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv",
"def manager_factory(manager_type):\n return {\n 'web': WebManager,\n 'github': GitHubManager,\n 'apkdownloadmirror': ApkDownloadMirrorManager,\n 'apkplz': ApkPlzManager,\n }[manager_type]",
"def launch_instance_manager():\n # Todo: Use name servers in the docker contexct (set up a docker compose?)\n # pyro4-ns\n parser = argparse.ArgumentParser(\"python3 launch_instance_manager.py\")\n parser.add_argument(\"--seeds\", type=str, default=None, \n help=\"The default seed for the environment.\")\n parser.add_argument(\"--seeding_type\", type=str, default=SeedType.CONSTANT, \n help=\"The seeding type for the environment. Defaults to 1 (CONSTANT)\"\n \"if a seed specified, otherwise 0 (NONE): \\n{}\".format(SeedType.__doc__))\n\n \n parser.add_argument(\"--max_instances\", type=int, default=None,\n help=\"The maximum number of instances the instance manager is able to spawn,\"\n \"before an exception is thrown. Defaults to Unlimited.\")\n opts = parser.parse_args()\n\n \n if opts.max_instances is not None:\n assert opts.max_instances > 0, \"Maximum instances must be more than zero!\"\n InstanceManager.MAXINSTANCES = opts.max_instances\n \n\n try:\n print(\"Removing the performance directory!\")\n try:\n shutil.rmtree(InstanceManager.STATUS_DIR)\n except:\n pass\n finally:\n if not os.path.exists(InstanceManager.STATUS_DIR):\n os.makedirs(InstanceManager.STATUS_DIR)\n print(\"autoproxy?\",Pyro4.config.AUTOPROXY)\n InstanceManager.REMOTE = True\n Pyro4.config.COMMTIMEOUT = InstanceManager.KEEP_ALIVE_PYRO_FREQUENCY \n\n # Initialize seeding.\n if opts.seeds is not None:\n InstanceManager._init_seeding(seeds=opts.seeds, seed_type=opts.seeding_type)\n else:\n InstanceManager._init_seeding(seed_type=SeedType.NONE)\n\n \n Pyro4.Daemon.serveSimple(\n {\n InstanceManager: INSTANCE_MANAGER_PYRO\n },\n ns = True)\n \n except Pyro4.errors.NamingError as e:\n print(e)\n print(\"Start the Pyro name server with pyro4-ns and re-run this script.\")",
"def new_manager() -> SyncManager:\n return Manager()",
"def createManagedDomain():\n selectCustomTemplate(localTemplate)\n loadTemplates()\n # set the Node Manager listen address and listen port.\n cd('/')\n cd('NMProperties')\n set('ListenAddress', hostname)\n #create the domain\n writeDomain(domainPath)",
"def createWrapper():\n\n # read properties file and get MANO name and IP\n config = RawConfigParser()\n config.read(\"../../coreMano/coreMano.properties\")\n name = config.get(\"CoreMano\", \"coreMano.name\")\n host_ip = config.get(\"CoreMano\", \"coreMano.ip\")\n\n # instanciate and return the MANO\n if name == \"osm\":\n mano = OsmWrapper(name, host_ip)\n if name == \"cloudify\":\n mano = CloudifyWrapper(name, host_ip)\n return mano",
"def create(addr='127.0.0.1', port=0, options=None):\n if options is None:\n options = {}\n\n backend = MitmProxy(addr, port, options)\n\n t = threading.Thread(name='Selenium Wire Proxy Server', target=backend.serve_forever)\n t.daemon = not options.get('standalone')\n t.start()\n\n addr, port, *_ = backend.address()\n log.info('Created proxy listening on %s:%s', addr, port)\n\n return backend",
"def Hypervisor(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)",
"def makeService_Agent(self, options):\n\n # Don't use memcached initially -- calendar server might take it away\n # at any moment. However, when we run a command through the gateway,\n # it will conditionally set ClientEnabled at that time.\n def agentPostUpdateHook(configDict, reloading=False):\n configDict.Memcached.Pools.Default.ClientEnabled = False\n\n config.addPostUpdateHooks((agentPostUpdateHook,))\n config.reload()\n\n # Verify that server root actually exists and is not phantom\n checkDirectory(\n config.ServerRoot,\n \"Server root\",\n access=W_OK,\n wait=True # Wait in a loop until ServerRoot exists and is not phantom\n )\n\n # These we need to set in order to open the store\n config.EnableCalDAV = config.EnableCardDAV = True\n\n def agentServiceCreator(pool, store, ignored, storageService):\n from calendarserver.tools.agent import makeAgentService\n if storageService is not None:\n # Shut down if DataRoot becomes unavailable\n from twisted.internet import reactor\n dataStoreWatcher = DirectoryChangeListener(\n reactor,\n config.DataRoot,\n DataStoreMonitor(reactor, storageService)\n )\n dataStoreWatcher.startListening()\n if store is not None:\n store.queuer = NonPerformingQueuer()\n return makeAgentService(store)\n\n uid, gid = getSystemIDs(config.UserName, config.GroupName)\n svc = self.storageService(\n agentServiceCreator, None, uid=uid, gid=gid\n )\n agentLoggingService = ErrorLoggingMultiService(\n config.ErrorLogEnabled,\n config.AgentLogFile,\n config.ErrorLogRotateMB * 1024 * 1024,\n config.ErrorLogMaxRotatedFiles,\n config.ErrorLogRotateOnStart,\n )\n svc.setName(\"agent\")\n svc.setServiceParent(agentLoggingService)\n return agentLoggingService",
"def create_parallel_manager(parallel_manager_class=None, **kwargs):\n if parallel_manager_class is not None:\n return parallel_manager_class(**kwargs)\n\n if has_mpi_peer_processes():\n return MPIParallelManager(MPI)\n\n number_of_subprocesses = kwargs.get(\"number_of_subprocesses\", 1)\n if should_fan_out(number_of_subprocesses):\n parallel_backend = kwargs.get(\"parallel_back_end\", \"ConcurrentFutures\")\n if parallel_backend == \"ConcurrentFutures\":\n return ConcurrentFuturesParallelManager(number_of_subprocesses)\n elif parallel_backend == \"MultiProcessing\":\n return MultiprocessingParallelManager(number_of_subprocesses)\n elif parallel_backend == \"RayIo\":\n if ray_avaialble:\n return RayIoParallelManager(number_of_subprocesses)\n else:\n raise ModuleNotFoundError(\"Ray is not available\")\n\n else:\n raise NotImplementedError(\n f\"ParallelManager {parallel_backend} is not yet implemented\"\n )\n\n return SingleProcessParallelManager()",
"def __init__(self, binary_manager, stats):\n self.services = {\n 'get_binaries': self._handle_get_binaries,\n 'binaries_received': self._handle_binary_received,\n 'client_result': self._handle_client_result,\n 'runtime_error': self._handle_runtime_error,\n 'verify_inputs_result': self._handle_verify_inputs,\n 'start_server': self._handle_start_server,\n 'stop_server': self._handle_stop_server\n }\n self.binary_manager = binary_manager\n self.__statistics_updater = stats",
"def get_manager():\n\n return multiprocessing.Manager()",
"def create_executor_plugin_manager() -> pluggy.PluginManager:\n pm = create_plugin_manager()\n pm.add_hookspecs(TaskGraphHooks)\n return pm",
"def fusion_api_create_deployment_manager(self, body, api=None, headers=None):\n return self.dep_mgr.create(body=body, api=api, headers=headers)",
"def create_vm(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_CreateVm', self.handle))",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--getErrors\",\n type=str,\n default=None,\n help=\"get error messages - send \\'yes\\' \")\n parser.add_argument(\"--host\",\n type=str,\n default=\"localhost\",\n help=\"Host of redis. Default : localhost\")\n parser.add_argument(\"--port\",\n type=int,\n default=6379,\n help=\"Port of redis. Default : 6379\")\n parser.add_argument(\"--db\",\n type=int,\n default=0,\n help=\"Db of redis. Default : 0\")\n parser.add_argument(\"--cleanTemp\",\n type=str,\n default=None,\n help=\"clean trash files from db - send \\'yes\\' \")\n return parser"
] | [
"0.6740068",
"0.62326854",
"0.6093036",
"0.60881156",
"0.6008856",
"0.5954897",
"0.59497124",
"0.5865377",
"0.5721835",
"0.56874454",
"0.56452775",
"0.56088275",
"0.56063145",
"0.554333",
"0.54826665",
"0.5461298",
"0.5404432",
"0.5404395",
"0.53953975",
"0.5392871",
"0.53741163",
"0.5346039",
"0.5343136",
"0.53369534",
"0.5321462",
"0.53169394",
"0.52909535",
"0.5283789",
"0.522802",
"0.52228546"
] | 0.7718598 | 0 |
Updates a hypervisor Manager. [Arguments] | def fusion_api_update_hypervisor_manager(self, body=None, uri=None, api=None, headers=None):
return self.hypervisor_mgr.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def manager_update(self, manager, config):\n self.request('/v1.1/managers/configs/%s' % manager, 'POST', body=config)",
"def fusion_api_create_hypervisor_manager(self, body, api=None, headers=None):\n return self.hypervisor_mgr.create(body=body, api=api, headers=headers)",
"async def light_manager_update(request: Request, call_next):\n\n logger.debug(\"pre manager.update\")\n busylightapi.manager.update()\n logger.debug(\"post manager.update\")\n return await call_next(request)",
"def update_podmanager(cls, podmanager_uuid, values):\n return cls.dbdriver.update_podmanager(podmanager_uuid, values)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def update_monitor(request, **kwargs):\n data = request.DATA\n monitor_id = data['monitor']['id']\n hm_type = data['monitor']['type']\n\n conn = get_sdk_connection(request)\n healthmonitor_kwargs = {\n 'delay': data['monitor'].get('delay'),\n 'timeout': data['monitor'].get('timeout'),\n 'max_retries': data['monitor'].get('max_retries'),\n 'max_retries_down': data['monitor'].get('max_retries_down'),\n 'admin_state_up': data['monitor'].get('admin_state_up'),\n 'name': data['monitor'].get('name')\n }\n if hm_type in ('HTTP', 'HTTPS'):\n healthmonitor_kwargs.update({\n 'http_method': data['monitor'].get('http_method'),\n 'url_path': data['monitor'].get('url_path'),\n 'expected_codes': data['monitor'].get('expected_codes')\n })\n\n healthmonitor = conn.load_balancer.update_health_monitor(\n monitor_id,\n **healthmonitor_kwargs\n )\n\n return _get_sdk_object_dict(healthmonitor)",
"def SetManager(self, mgr):\r\n\r\n self.manager = mgr",
"def put(self, name):\n request_dict = get_json_and_verify_params({\n 'node_instance_id': {'type': unicode},\n 'state': {'type': unicode}\n })\n validate_inputs({'name': name})\n state = request_dict.get('state')\n self._validate_state(state)\n\n try:\n return self._create_agent(name, state, request_dict)\n except manager_exceptions.ConflictError:\n return self._update_agent(name, state)",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def update_health_monitor(self, health_monitor, body=None):\r\n return self.put(self.health_monitor_path % (health_monitor), body=body)",
"def update_manager(self):\n if self.name == \"observable\":\n self.fsm_manager.update_latent()\n else:\n self.fsm_manager.update_observable()",
"def Update(self, controller):\n pass",
"def update(self, args):\n pass",
"def fusion_api_update_deployment_manager(self, body=None, uri=None, api=None, headers=None):\n return self.dep_mgr.update(body=body, uri=uri, api=api, headers=headers)",
"def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent",
"def update(self, *args, **kw):\n pass",
"def manager():\n pass",
"def update(self, signals):\n raise NotImplementedError('Agent is an abstract base class')",
"def fusion_api_edit_san_manager(self, body, uri, api=None, headers=None):\n return self.dm.update(body, uri, api, headers)",
"def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def command_update_hw(self, cmd):\n # TODO\n pass",
"def setManager(self, manager=None):\n self._manager = manager",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def _setManager(self, mgr: \"StrategyManager\") -> None:",
"def update_from_router():\n update_items(router, async_add_entities, tracked)",
"def update(self, *args, **kwargs):",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass"
] | [
"0.66171026",
"0.582097",
"0.5712763",
"0.56886685",
"0.56004804",
"0.56004804",
"0.55416906",
"0.55198437",
"0.53832316",
"0.5369145",
"0.5321215",
"0.5302087",
"0.52570873",
"0.52438563",
"0.52284807",
"0.52227217",
"0.52209795",
"0.5214909",
"0.5200389",
"0.5195968",
"0.51824003",
"0.51620835",
"0.5155655",
"0.5154238",
"0.5129872",
"0.5074644",
"0.50489527",
"0.5042335",
"0.5042335",
"0.5042335"
] | 0.77996296 | 0 |
Deletes a hypervisor Manager. [Arguments] | def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):
return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_podmanager(cls, podmanager_uuid):\n cls.dbdriver.delete_podmanager(podmanager_uuid)",
"def manager_remove(self, manager):\n self.request('/v1.1/managers/configs/%s' % manager, 'DELETE')",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self)",
"def delete_health_monitor(self):\n return self.manager.delete_health_monitor(self)",
"def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def delete_agent(self, agent):\r\n return self.delete(self.agent_path % (agent))",
"def delete_health_monitor(self, health_monitor):\r\n return self.delete(self.health_monitor_path % (health_monitor))",
"def delete_router(self, router):\r\n return self.delete(self.router_path % (router))",
"def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def delete_healthmonitor(self, context, healthmonitor):\n LOG.info(\"Received request 'Delete Pool Health Monitor' for \"\n \"Health monitor:%(hm)s\",\n {'hm': healthmonitor['id']})\n arg_dict = {'context': context,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_DELETE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])",
"def delete(self):\n os.system(\"rm \"+self._name)",
"async def delete(self):\r\n try:\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent_to_delete = Agent.filter(Agent.uuid == agent_uuid).first()\r\n sys_id = (\r\n System.select().where(System.agent_uuid == agent_to_delete).execute()\r\n )\r\n if sys_id:\r\n logger.error(\"Agent not deleted\")\r\n return web.Response(text=\"Agent not deleted.\")\r\n else:\r\n agent_to_delete.delete_instance()\r\n logger.info(\"Agent deleted successfully\")\r\n return web.Response(text=\"Agent deleted successfully.\")\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=error_message, status=500)",
"def delete_virtual_machine(self, vm):\n try:\n self.client.delete_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def delete(self, hDevicesList = consts.PRL_INVALID_HANDLE):\n\t\treturn Job(SDK.PrlVm_Delete(self.handle, conv_handle_arg(hDevicesList))[0])",
"def delete(self):\n self.model.remove_agents(self)",
"def delete(self, request, m_name):\n machine = Machine.objects.get(name=m_name)\n machine.delete()\n return HttpResponse(HTTPStatus.OK)",
"def deleteVirtualMachine(self,node,vmid):\n data = self.connect('delete',\"nodes/%s/qemu/%s\" % (node,vmid),None)\n return data",
"def delete_vm(self, account, vm_id):\n node = Node()\n node.id = vm_id\n self.driver(account).destroy_node(node)",
"def _delete_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def delete_entity(self, context, hm):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, MONITORS_RESOURCE,\n hm.id)\n msg = _(\"NetScaler driver healthmonitor removal: %s\") % hm.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)",
"def delete(self, psvm):\n self._delete('/os-psvm/%s' % (base.getid(psvm)))",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def delete(self, oid):\n path = '%s/routers/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack router: %s' % truncate(res))\n return res[0]",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def delete(args):\n if args.tag is not None:\n tag = str(args.tag)\n interface = DigitalOceanSetup.create_interface()\n # Delete everything matching the tag\n interface.destroy_machines_by_tag(tag)\n elif args.delete_list:\n server_list = read_server_file()\n if len(server_list) == 1:\n interface = DigitalOceanSetup.create_interface()\n droplet_details = server_list[0]\n # Download the save game from the server\n if args.save:\n eprint(\"Running Ansible...\")\n os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] = \"False\"\n process = subprocess.Popen([\"ansible-playbook\", \"-i\",\n droplet_details[\"name\"] + \",\",\n \"--private-key\", \"~/.ssh/id_rsa\",\n \"save-factorio.yml\"],\n stdout=subprocess.PIPE)\n out, _ = process.communicate()\n eprint(out)\n # Now destory the droplet\n interface.destroy_machine_by_id(droplet_details[\"id\"])\n # Save empty list to file\n save_dict_to_file(\"servers.json\", [])\n else:\n eprint(\"Too many or no items in server list.\")\n else:\n eprint(\"Missing arguments.\")",
"def fusion_api_delete_fabric_manager(self, name, uri=None, api=None, headers=None):\n return self.fabricmanager.delete(name=name, uri=uri, api=api, headers=headers)"
] | [
"0.6937779",
"0.67486507",
"0.63681626",
"0.63681626",
"0.6347231",
"0.6175517",
"0.6060668",
"0.59882313",
"0.59756935",
"0.5936945",
"0.59265184",
"0.58496314",
"0.5802527",
"0.5783574",
"0.57668984",
"0.57509613",
"0.57486266",
"0.57470006",
"0.5669473",
"0.55834955",
"0.55784875",
"0.5569647",
"0.5562652",
"0.55623466",
"0.5559015",
"0.55374545",
"0.5537145",
"0.54972947",
"0.54943854",
"0.54889584"
] | 0.806357 | 0 |
Gets a Hypervisor clusters. [Arguments] | def fusion_api_get_hypervisor_clusters(self, uri=None, param='', api=None, headers=None):
return self.hypervisor_clusters.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)",
"def list_clusters(self, **kwargs):\n return self._get_names('SCVMHostCluster')",
"def get_clusters(self):\r\n\r\n return self.__clusters",
"def _get_cluster_list(self):\n return self.__cluster_list",
"def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))",
"def get_clusters(self):\n return self._clusters",
"def get_clusters(self):\n\n return self.__clusters",
"def clusters(self):\n raise NotImplementedError",
"def Clusters(self):\n return",
"def list_vsan_clusters(self, detail=False, params=None, return_body=False):\n url = 'clusters'\n if detail:\n url += '/detail'\n if params:\n url += '?%s' % self._prepare_params(params)\n\n key = None if return_body else 'clusters'\n return self._ext_get(url, key)",
"def listClusters():\n return [c['name'] for c in pymongo.Connection().clovr.clusters.find()]",
"def show_clusters() -> Dict[str, Cluster]:\n environment = EnvironmentProvider().environment\n return {key: value for key, value in environment.clusters.items()}",
"def ListClusters(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_clusters():\n return objects.ClusterCollection.order_by(\n objects.ClusterCollection.all(),\n 'id'\n )",
"def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK",
"def get_clusters(cluster_name: Optional[str] = None,\n cluster_states: Optional[Sequence[str]] = None,\n cluster_types: Optional[Sequence[str]] = None,\n ids: Optional[Sequence[str]] = None,\n max_results: Optional[int] = None,\n name_regex: Optional[str] = None,\n next_token: Optional[str] = None,\n output_file: Optional[str] = None,\n payment_types: Optional[Sequence[str]] = None,\n resource_group_id: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClustersResult:\n __args__ = dict()\n __args__['clusterName'] = cluster_name\n __args__['clusterStates'] = cluster_states\n __args__['clusterTypes'] = cluster_types\n __args__['ids'] = ids\n __args__['maxResults'] = max_results\n __args__['nameRegex'] = name_regex\n __args__['nextToken'] = next_token\n __args__['outputFile'] = output_file\n __args__['paymentTypes'] = payment_types\n __args__['resourceGroupId'] = resource_group_id\n __args__['tags'] = tags\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('alicloud:emrv2/getClusters:getClusters', __args__, opts=opts, typ=GetClustersResult).value\n\n return AwaitableGetClustersResult(\n cluster_name=pulumi.get(__ret__, 'cluster_name'),\n cluster_states=pulumi.get(__ret__, 'cluster_states'),\n cluster_types=pulumi.get(__ret__, 'cluster_types'),\n clusters=pulumi.get(__ret__, 'clusters'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n max_results=pulumi.get(__ret__, 'max_results'),\n name_regex=pulumi.get(__ret__, 'name_regex'),\n names=pulumi.get(__ret__, 'names'),\n next_token=pulumi.get(__ret__, 'next_token'),\n output_file=pulumi.get(__ret__, 'output_file'),\n payment_types=pulumi.get(__ret__, 'payment_types'),\n resource_group_id=pulumi.get(__ret__, 'resource_group_id'),\n tags=pulumi.get(__ret__, 'tags'),\n total_count=pulumi.get(__ret__, 'total_count'))",
"def test_get_hyperflex_cluster_list(self):\n pass",
"def list(args, config):\n\n api = config['API']\n headers = {}\n if args.stack_name:\n headers = {'stack-name': args.stack_name} # put stack name in headers\n r = requests.get(api['list'], headers=headers) # send the GET request\n print('\\nThe following clusters exist:\\n{}\\n'.format(r.json()))\n return",
"def clusters(self):\n return self._clusters",
"def get_remote_clusters(cohesity_client):\n remote_cluster_list = cohesity_client.remote_cluster.get_remote_clusters()\n for cluster in remote_cluster_list:\n config_dict[cluster.name] = None\n exported_res_dict[\"Remote Clusters\"].append(cluster.name)\n return remote_cluster_list",
"def list_clusters(ctx, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n clusters = ctx.obj.groups[project.id].clusters.get()\n pprint(clusters.data)",
"def get_clusters() -> List[List[str]]:\n all_users = get_user_ids()\n pass",
"def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }",
"def extract_clusters(self, dictionary=None, autorenaming_option=True):\n cluster_list = self.__dendrogram._extract_clusters_by_color()\n return cluster_list if autorenaming_option is False else self.__autorename_clusters(cluster_list, dictionary, 5)",
"def list_coe_clusters(self):\n return list(self.container_infrastructure_management.clusters())",
"def atlas_clusters():\n pass",
"def get_cluster_list():\n\n cluster_list_command = [\"pcluster\", \"list\",\n \"--region\", AWS_REGION]\n\n cluster_list_returncode, cluster_list_stdout, cluster_list_stderr = run_subprocess_proc(cluster_list_command,\n capture_output=True)\n\n cluster_columns = [\"Name\", \"Status\", \"Version\"]\n\n if cluster_list_stdout is not None and not cluster_list_stdout.strip() == \"\":\n clusters_as_df = pd.DataFrame([row.split()\n for row in cluster_list_stdout.strip().split(\"\\n\")],\n columns=cluster_columns)\n else:\n logger.info(\"No clusters found\")\n sys.exit(0)\n\n return clusters_as_df",
"def list_clusters(_filter=None):\n ecs_clusters = __paginate_call(ecs_client, 'list_clusters', 'clusterArns')\n if _filter:\n ecs_clusters = [cluster for cluster in ecs_clusters if _filter in cluster]\n return sorted(ecs_clusters)",
"def get_clusters(self, email: str) -> Optional[List[str]]:\n\n try:\n response = self.session.get(\n url=self.url + '/list',\n headers=self._get_request_header(),\n timeout=self._timeout_config,\n params={\n 'email': email\n }\n )\n except RequestsConnectionError as conn_err:\n message = str(conn_err)\\\n + ' Connection error, WCS clusters were not fetched.'\n raise type(conn_err)(message).with_traceback(sys.exc_info()[2])\n\n if response.status_code == 200:\n return response.json()['clusterIDs']\n raise UnexpectedStatusCodeException('Checking WCS instance', response)",
"def list_ecs_clusters():\n clusters = ECS_MANAGER.list_ecs_clusters()\n\n print(str_sep)\n\n if clusters:\n print(\"Listing clusters ARNs available in {}\"\n .format(SESSION.region_name.upper()))\n print(str_sep)\n for arn in clusters['clusterArns']:\n print(arn)\n\n print(str_sep)"
] | [
"0.7424538",
"0.7212302",
"0.71992284",
"0.7133369",
"0.7071449",
"0.70424414",
"0.697467",
"0.693924",
"0.69356865",
"0.6861388",
"0.68498963",
"0.68434745",
"0.6812175",
"0.6797095",
"0.67725927",
"0.6652517",
"0.66451806",
"0.6602558",
"0.6587589",
"0.6474094",
"0.6470819",
"0.64399344",
"0.6433191",
"0.64136964",
"0.63922614",
"0.6390031",
"0.63598746",
"0.6328048",
"0.6325203",
"0.6310139"
] | 0.80251724 | 0 |
Gets a Hypervisor cluster profile. [Arguments] | def fusion_api_get_hypervisor_cluster_profile(self, uri=None, param='', api=None, headers=None):
return self.cluster_profile.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cluster(self, profile):\n if self._value.has_option(profile, 'cluster'):\n if self._value.has_option(profile, 'cluster'):\n cluster = self._value.get(profile, 'cluster')\n self.logger.info(\"Connecting to: %s cluster\" % cluster)\n else:\n self.logger.error(\n \"No cluster parameter found\"\n )\n exit(1)\n else:\n self.logger.error(\n \"No profile found. Please define a default profile, \\\n or specify a named profile using `--profile`\"\n )\n exit(1)\n return cluster",
"def aks_cluster_profile(self) -> 'outputs.ClusterPoolResourcePropertiesResponseAksClusterProfile':\n return pulumi.get(self, \"aks_cluster_profile\")",
"def fusion_api_get_hypervisor_host_profile(self, uri=None, param='', api=None, headers=None):\n return self.host_profile.get(uri, api, headers, param)",
"def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()",
"def test_get_hyperflex_cluster_profile_list(self):\n pass",
"def get_cluster_autoscaler_profile(self) -> Union[Dict[str, str], None]:\n return self._get_cluster_autoscaler_profile()",
"def compute_profile(self) -> 'outputs.ClusterPoolResourcePropertiesResponseComputeProfile':\n return pulumi.get(self, \"compute_profile\")",
"def get_network_profile(arn=None):\n pass",
"def fusion_api_create_hypervisor_cluster_profile(self, body, api=None, headers=None):\n return self.cluster_profile.create(body=body, api=api, headers=headers)",
"def show_network_profile(self, profile, **params):\r\n return self.get(self.network_profile_path % (profile), params=params)",
"def _get_profile(self):\n return self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"profile\")\n )",
"def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)",
"def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)",
"def test_get_hyperflex_cluster_profile_by_moid(self):\n pass",
"def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None",
"def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')",
"def get_user_profile(self):\n\t\treturn Job(SDK.PrlSrv_GetUserProfile(self.handle)[0])",
"def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def getprofile(): # real signature unknown; restored from __doc__\n pass",
"def get_profile_output(profile_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProfileResult]:\n ...",
"def get_profile():\n # Create the netCDF file\n nc = make_ctd_file()\n\n # Return a profile object with all available chemicals in the CTD data\n return ambient.Profile(nc, chem_names='all')",
"def profile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"profile\")",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def test_create_hyperflex_cluster_profile(self):\n pass",
"def fusion_api_delete_hypervisor_cluster_profile(self, name=None, uri=None, api=None, headers=None):\n return self.cluster_profile.delete(name, uri, api, headers)",
"def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }",
"def get(cls, client, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tif not name :\n\t\t\t\tobj = lbprofile()\n\t\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\telse :\n\t\t\t\tif type(name) is not list :\n\t\t\t\t\tif type(name) == cls :\n\t\t\t\t\t\traise Exception('Invalid parameter name:{0}'.format(type(name)))\n\t\t\t\t\tobj = lbprofile()\n\t\t\t\t\tobj.lbprofilename = name\n\t\t\t\t\tresponse = obj.get_resource(client, option_)\n\t\t\t\telse :\n\t\t\t\t\tif name and len(name) > 0 :\n\t\t\t\t\t\tif type(name[0]) == cls :\n\t\t\t\t\t\t\traise Exception('Invalid parameter name:{0}'.format(type(name[0])))\n\t\t\t\t\t\tresponse = [lbprofile() for _ in range(len(name))]\n\t\t\t\t\t\tobj = [lbprofile() for _ in range(len(name))]\n\t\t\t\t\t\tfor i in range(len(name)) :\n\t\t\t\t\t\t\tobj[i] = lbprofile()\n\t\t\t\t\t\t\tobj[i].lbprofilename = name[i]\n\t\t\t\t\t\t\tresponse[i] = obj[i].get_resource(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e"
] | [
"0.71974623",
"0.645041",
"0.6425894",
"0.6204044",
"0.6123612",
"0.60758156",
"0.6050714",
"0.59964824",
"0.59913784",
"0.59863085",
"0.5854982",
"0.5842499",
"0.58419126",
"0.57995874",
"0.57753825",
"0.5745835",
"0.57405496",
"0.5696487",
"0.56512314",
"0.56255156",
"0.5617346",
"0.56155586",
"0.5571767",
"0.555851",
"0.55481815",
"0.5524049",
"0.55119675",
"0.5456809",
"0.5455689",
"0.5433061"
] | 0.80410475 | 0 |
Creates a hypervisor cluster profile. [Arguments] | def fusion_api_create_hypervisor_cluster_profile(self, body, api=None, headers=None):
return self.cluster_profile.create(body=body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_hyperflex_cluster_profile(self):\n pass",
"def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))",
"def create_cluster(self, provision_details, project_id=\"\"):\n response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'\n ,body=provision_details)\n return response",
"def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)",
"def create_cluster(self, name, cluster_type, params, ssh_key, *args, **kwargs):\n raise NotImplementedError",
"def _create_profile(self, user, profile_dir):\n log.info(\"Writing IPython cluster config files\")\n self._master.ssh.switch_user(user)\n self._master.ssh.execute(\"rm -rf '%s'\" % profile_dir)\n self._master.ssh.execute('ipython profile create')\n self._master.ssh.switch_user('root')",
"def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))",
"def create(*args, **kwargs):\n\n factory = V2ProfileFactory()\n output = factory.create(export_json=True)\n click.echo(output)",
"def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)",
"def _create_profile(self, user, profile_dir):\n log.info(\"Writing IPython cluster config files\")\n self._master.ssh.switch_user(user)\n self._master.ssh.execute(\"rm -rf '%s'\" % profile_dir)\n self._master.ssh.execute('ipython profile create')\n # Add startup files\n\n self._master.ssh.switch_user('root')",
"def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))",
"def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output",
"def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)",
"def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)",
"def cluster_create(self, cluster_name, license):\n return self.request( \"cluster-create\", {\n 'cluster_name': [ cluster_name, 'cluster-name', [ basestring, 'None' ], False ],\n 'license': [ license, 'license', [ basestring, 'license-code-v2' ], False ],\n }, {\n } )",
"def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)",
"def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)",
"def create_network_profile(self, body=None):\r\n return self.post(self.network_profiles_path, body=body)",
"def create_cluster(\n self,\n name: str,\n cluster_type: Union[dto.ClusterType, str],\n params: Mapping[str, Any],\n ssh_key: str\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def create(profile, name):\n # Make sure it doesn't exist already.\n if exists(profile, name):\n msg = \"Instance profile '\" + str(name) + \"' already exists.\"\n raise ResourceAlreadyExists(msg)\n\n # Now we can create it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"create\", params)\n\n # Check that it exists.\n instance_profile_data = polling_fetch(profile, name)\n if not instance_profile_data:\n msg = \"Instance profile '\" + str(name) + \"' not created.\"\n raise ResourceNotCreated(msg)\n\n # Send back the instance profile's info.\n return instance_profile_data",
"def test_create_hyperflex_node_profile(self):\n pass",
"def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])",
"def create_cluster(self, cluster: dict) -> None:\n if self.master_url:\n return\n try:\n self._cluster_client.create_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster': cluster\n })\n _LOGGER.info(\n 'Cluster created successfully: %s',\n self.cluster_metadata.cluster_name)\n self.master_url = self.get_master_url(self.cluster_metadata)\n except Exception as e:\n if e.code == 409:\n _LOGGER.info(\n 'Cluster %s already exists. Continuing...',\n ie.current_env().clusters.default_cluster_name)\n elif e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to create cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot create a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 501:\n _LOGGER.error(\n 'Invalid region provided: %s', self.cluster_metadata.region)\n raise ValueError(\n 'Region {} does not exist!'.format(self.cluster_metadata.region))\n else:\n _LOGGER.error(\n 'Unable to create cluster: %s', self.cluster_metadata.cluster_name)\n raise e",
"def create(profile, cluster, task_definition, started_by=None, count=None):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"taskDefinition\"] = task_definition\n if started_by:\n params[\"startedBy\"] = started_by\n if count:\n params[\"count\"] = count\n return client.run_task(**params)",
"def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?",
"def create_server_profile(profile_obj):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n created = 0\n already_exists = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n logger.info(\"creating a server profile with name '%s' ...\" % profile.name)\n # checking if the profile is already existing\n if not VerifyServerProfile.verify_server_profile_not_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' already exists\" % profile.name)\n already_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Create SP dialog and enter data ...\n CreateServerProfile.click_create_profile_button()\n CreateServerProfile.wait_create_server_profile_dialog_shown()\n\n CreateServerProfile.input_name(profile.name)\n CreateServerProfile.input_select_server_profile_template(profile.prof_temp)\n CreateServerProfile.input_description(getattr(profile, 'desc', ''))\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - power off the server if the it is powered on,\n # - verify the server hardware type of the selected one is refreshed to the type name displayed in the drop-down list\n # for selecting server hardware\n if not CreateServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for creating server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to create other server profiles\" % (profile.server, profile.name))\n continue\n msg = CreateServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be created successfully: \\n<%s>\" % msg)\n ui_lib.fail_test(msg)\n # input 'Server hardware type', 'Enclosure group'\n # TODO: update Edit Server Profile as well\n if profile.server != 'unassigned':\n # verify if 'Server hardware type' is automatically set by selecting 'Server hardware'\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n if sht_selected == '':\n logger.info(\"'server hardware type' is not selected, select it with name '%s'\" % profile.hardwareType)\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n elif profile.hardwareType not in sht_selected:\n msg = \"selected server hardware type '%s' of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.server, profile.hardwareType)\n logger.warn(msg)\n ui_lib.fail_test(msg)\n else:\n # input 'Enclosure group'\n if hasattr(profile, 'for_server'):\n hardware_type = FusionUIBase.APIMethods().get_server_hardware_type_by_server_hardware_name(profile.for_server)\n logger.info('For server attribute is %s, hardware type is %s' % (profile.for_server, hardware_type))\n CreateServerProfile.input_select_server_hardware_type(hardware_type)\n else:\n CreateServerProfile.input_select_server_hardware_type(profile.hardwareType)\n CreateServerProfile.input_select_enclosure_group(profile.enclgroup) if getattr(profile, 'enclgroup', None) is not None else None\n sht_selected = CreateServerProfile.get_selected_server_hardware_type(profile.server)\n # input 'Affinity' for BL server, or when 'server hardware' == 'unassigned'\n if getattr(profile, 'hardwareType', None) is not None:\n hardware_type = profile.hardwareType\n\n if str(hardware_type)[:2:] == 'BL' or profile.server == 'unassigned':\n if getattr(profile, 'Affinity', None) is not None:\n logger.info(\"test data for 'Affinity' is found: <%s>, start setting Affinity ...\" % profile.Affinity)\n CreateServerProfile.select_affinity_by_text(profile.Affinity)\n\n if getattr(profile, 'Firmware', None) is not None:\n logger.info(\"test data for 'Firmware' is found: <%s>, start setting Firmware Baseline ...\" % profile.Firmware)\n logger.debug(\"test data for 'Firmware' is found: <%s>\" % profile.Firmware, also_console=False)\n # set Firmware Baseline and force-installation option\n CommonOperationServerProfile.Firmware.set(profile.Firmware)\n\n if getattr(profile, 'Connections', None) is not None:\n logger.debug(\"test data for 'Connections' is found: <%s>\" % profile.Connections, also_console=False)\n logger.info(\"test data for 'Connections' is found, start adding connections ...\")\n # add connections\n CommonOperationServerProfile.Connection.set(profile.Connections)\n\n if getattr(profile, 'LocalStorage', None) is not None:\n logger.debug(\"test data for 'Local Storage' is found: <%s>\" % profile.LocalStorage, also_console=False)\n logger.info(\"test data for 'Local Storage' is found, start setting local storage options ... \")\n CommonOperationServerProfile.LocalStorage.set(profile.LocalStorage)\n\n if getattr(profile, 'SANStorage', None) is not None:\n logger.debug(\"test data for 'SAN Storage' is found:<%s>\" % profile.SANStorage, also_console=False)\n logger.info(\"test data for 'SAN Storage' is found, start setting SAN storage options and adding volumes ...\")\n # select \"Manage SAN Storage\" checkbox\n CommonOperationServerProfile.SANStorage.set(profile.SANStorage)\n\n if getattr(profile, 'BootSettings', None) is not None:\n logger.debug(\"test data for 'Boot Settings' is found: <%s>\" % profile.BootSettings, also_console=False)\n logger.info(\"test data for 'Boot Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BootSettings.set(profile, server_hardware_type=sht_selected)\n\n # 'BIOSSettings' part is ignored since BIOS setting is complicated to verify the result, therefor\n # might be better to use a dedicated tool to do this part automation separately\n if getattr(profile, 'BIOSSettings', None) is not None:\n logger.debug(\"test data for 'BIOS Settings' is found: <%s>\" % profile.BIOSSettings, also_console=False)\n logger.info(\"test data for 'BIOS Settings' is found, start setting its options ...\")\n CommonOperationServerProfile.BIOSSettings.set(profile.BIOSSettings)\n\n if getattr(profile, 'Advanced', None) is not None:\n logger.debug(\"test data for 'Advanced' is found: <%s>\" % profile.Advanced, also_console=False)\n logger.info(\"test data for 'Advanced' is found, start setting its options ...\")\n # select \"MAC/WWN/Serial/Hide unused FlexNICs\" radio box\n CreateServerProfile.Advanced.set(profile)\n\n CreateServerProfile.click_create_button()\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data of server profile '%s' may be wrongly defined for 'Boot mode', which caused an error that blocks profile being created. \"\n \"test will skip this profile and continue to create other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_DIALOG_CREATE_PROFILE_ERROR_WARNING, PerfConstants.WAIT_UNTIL_CONSTANT):\n logger._warn(\"Profile %s will create with server hardware has health status as WARNING\" % profile.name)\n CreateServerProfile.click_create_button()\n else:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n if CreateServerProfile.wait_create_server_profile_dialog_disappear(timeout=180, fail_if_false=False) is True:\n if getattr(profile, 'wait_complete', \"True\").lower() != \"false\":\n FusionUIBase.show_activity_sidebar()\n timeout = int(getattr(profile, 'timeout', \"3600\"))\n if FusionUIBase.wait_activity_action_ok(profile.name, 'Create', timeout=timeout, fail_if_false=False) is True:\n FusionUIBase.show_activity_sidebar()\n if CommonOperationServerProfile.wait_server_profile_status_ok_or_warn(profile.name, timeout=180, fail_if_false=False) is True:\n logger.info(\"created server profile '%s' successfully\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_server_profile_status_ok_or_warn' = FALSE, skip to next profile ... \")\n continue\n else:\n logger.warn(\"'wait_activity_action_ok' = FALSE, skip to next profile ... \")\n FusionUIBase.show_activity_sidebar()\n continue\n else:\n logger.info(\"created server profile '%s' successfully but no need to wait for task complete\" % profile.name)\n created += 1\n else:\n logger.warn(\"'wait_create_server_profile_dialog_disappear' = FALSE, skip to next profile ... \")\n CreateServerProfile.click_cancel_button()\n continue\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - already_exists == 0:\n logger.warn(\"no server profile to create! all %s server profile(s) is already existing, test is considered PASS\" % already_exists)\n return True\n else:\n if created < total:\n logger.warn(\"not all of the server profile(s) is successfully created - %s out of %s created \" % (created, total))\n if created + already_exists == total:\n logger.warn(\"%s already existing server profile(s) is skipped, test is considered PASS\" % already_exists)\n return True\n else:\n ui_lib.fail_test(\"%s already existing server profile(s) is skipped, %s profile(s) left is failed being created \" % (already_exists, total - created - already_exists))\n\n logger.info(\"all of the server profile(s) is successfully created - %s out of %s \" % (created, total))\n return True",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta"
] | [
"0.7299049",
"0.6737419",
"0.6406335",
"0.63813174",
"0.62996036",
"0.6183486",
"0.61396635",
"0.6103186",
"0.6031262",
"0.6022853",
"0.6017192",
"0.5997851",
"0.5964505",
"0.5943418",
"0.5941464",
"0.5923629",
"0.5901515",
"0.5809093",
"0.57562375",
"0.57538897",
"0.5749142",
"0.5738369",
"0.572721",
"0.5694008",
"0.5687158",
"0.5683875",
"0.5683787",
"0.5663614",
"0.5662908",
"0.56313884"
] | 0.78777266 | 0 |
Updates a hypervisor cluster profile. [Arguments] | def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):
return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):\n return self.host_profile.update(body, uri, api, headers)",
"def fusion_api_create_hypervisor_cluster_profile(self, body, api=None, headers=None):\n return self.cluster_profile.create(body=body, api=api, headers=headers)",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def test_patch_hyperflex_cluster_profile(self):\n pass",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })",
"def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def fusion_api_delete_hypervisor_cluster_profile(self, name=None, uri=None, api=None, headers=None):\n return self.cluster_profile.delete(name, uri, api, headers)",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def test_update_hyperflex_node_profile(self):\n pass",
"def update(id, body: Body):\n\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n cluster.update(body.dict())\n cluster = clusters.update(cluster)\n\n return cluster.export()",
"def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def fusion_api_get_hypervisor_cluster_profile(self, uri=None, param='', api=None, headers=None):\n return self.cluster_profile.get(uri=uri, api=api, headers=headers, param=param)",
"def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)",
"def update_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.CLUSTER_NAME)\n parser.add_custom_argument('--key', required=True, action='store',\n help='Name of ESXi Advanced Setting to update')\n parser.add_custom_argument('--value', required=True, action='store',\n help='Value of the ESXi Advanced Setting to update')\n args = parser.get_args()\n try:\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n cluster = pchelper.get_obj(content, [vim.ClusterComputeResource], args.cluster_name)\n\n hosts = cluster.host\n for host in hosts:\n option_manager = host.configManager.advancedOption\n option = vim.option.OptionValue(key=args.key,\n value=int(args.value))\n print(\"Updating %s on ESXi host %s \"\n \"with value of %s\" % (args.key, host.name, args.value))\n if option_manager.UpdateOptions(changedValue=[option]):\n print(\"Settings updated!\")\n\n except vmodl.MethodFault as ex:\n print(\"Caught vmodl fault : \" + ex.msg)\n return -1\n except Exception as ex:\n print(\"Caught exception : \" + str(ex))\n return -1\n\n return 0",
"def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]",
"def do_update(self):\n params = self.inputs\n new_profile_id = params.get('new_profile_id', None)\n if new_profile_id and new_profile_id == self.entity.profile_id:\n params.pop('new_profile_id')\n\n if not params:\n return self.RES_OK, 'No property to update.'\n\n res = self.entity.do_update(self.context, params)\n if res:\n return self.RES_OK, 'Node updated successfully.'\n else:\n return self.RES_ERROR, 'Node update failed.'"
] | [
"0.69869",
"0.65845144",
"0.6579093",
"0.6464687",
"0.6215387",
"0.60455185",
"0.6026187",
"0.60101837",
"0.59323263",
"0.5911774",
"0.5902939",
"0.58916545",
"0.5850991",
"0.57385135",
"0.5716251",
"0.5682264",
"0.567401",
"0.56732166",
"0.56196904",
"0.55932254",
"0.5577131",
"0.5551176",
"0.5525782",
"0.5517768",
"0.54490584",
"0.54251266",
"0.54070693",
"0.54042876",
"0.54016256",
"0.5370111"
] | 0.8152496 | 0 |
Deletes a hypervisor cluster profile. [Arguments] | def fusion_api_delete_hypervisor_cluster_profile(self, name=None, uri=None, api=None, headers=None):
return self.cluster_profile.delete(name, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_delete_hyperflex_cluster_profile(self):\n pass",
"def delete_network_profile(arn=None):\n pass",
"def delete(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.delete_instance_profile(**params)",
"def delete_network_profile(self, profile):\r\n return self.delete(self.network_profile_path % profile)",
"def delete_cluster(self):",
"def delete(self,\n tunnel_profile_id,\n ):\n return self._invoke('delete',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n })",
"def delete_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n ctx.obj.groups[project.id].clusters[cluster_name].delete().data\n click.echo(\"DONE!\")",
"def delete_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError",
"def remove_vpn_profile(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n profile_type = kwargs['profile_type']\n\n match profile_type:\n case \"ike\":\n profile = \"ipsec-vpn-ike-profiles\"\n case \"ipsec\":\n profile = \"ipsec-vpn-tunnel-profiles\"\n case \"dpd\":\n profile = \"ipsec-vpn-dpd-profiles\"\n case other:\n print(\"Invalid profile type\")\n sys.exit(1)\n\n json_response_status_code = delete_vpn_profile(proxy, session_token, display_name, profile)\n if json_response_status_code == 200:\n sys.exit(f\"Tier-1 VPN service {display_name} was deleted successfully\")\n else:\n print(f\"There was an error deleting Tier1 VPN service {display_name}\")\n sys.exit(1)",
"def test_delete_profile(self):\n self.cim.delete_profile(customer_profile_id=u\"123\")",
"def delete(profile, name):\n # Make sure the instance profile exists.\n if not exists(profile, name):\n msg = \"No instance profile '\" + str(name) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Now try to delete it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"delete\", params)\n\n # Check that it was, in fact, deleted.\n if exists(profile, name):\n msg = \"The instance profile '\" + str(name) + \"' was not deleted.\"\n raise ResourceNotDeleted(msg)",
"def delete(stack, region, profile):\n ini_data = {}\n environment = {}\n\n environment['stack_name'] = stack\n if region:\n environment['region'] = region\n else:\n environment['region'] = find_myself()\n\n if profile:\n environment['profile'] = profile\n\n ini_data['environment'] = environment\n\n if start_smash(ini_data):\n sys.exit(0)\n else:\n sys.exit(1)",
"def delete(name: str):\n profiles = prefect.settings.load_profiles()\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n current_profile = prefect.context.get_settings_context().profile\n if current_profile.name == name:\n exit_with_error(\n f\"Profile {name!r} is the active profile. You must switch profiles before\"\n \" it can be deleted.\"\n )\n\n profiles.remove_profile(name)\n\n verb = \"Removed\"\n if name == \"default\":\n verb = \"Reset\"\n\n prefect.settings.save_profiles(profiles)\n exit_with_success(f\"{verb} profile {name!r}.\")",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def test_delete_hyperflex_node_profile(self):\n pass",
"def delete(self, request, flavor_profile_id):\n conn = get_sdk_connection(request)\n conn.load_balancer.delete_flavor_profile(flavor_profile_id,\n ignore_missing=True)",
"def delete(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n ):\n return self._invoke('delete',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n })",
"def fusion_api_delete_server_profile(self, name=None, uri=None, param='', api=None, headers=None):\n return self.profile.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def delprofile(variable, account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n json_metadata = Profile(acc[\"json_metadata\"])\n\n for var in variable:\n json_metadata.remove(var)\n\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)",
"def people_delete(self, profiles=None, query_params=None, timezone_offset=None, ignore_alias=True, backup=True,\n backup_file=None):\n return self.people_operation('$delete', '', profiles=profiles, query_params=query_params,\n timezone_offset=timezone_offset, ignore_alias=ignore_alias, backup=backup,\n backup_file=backup_file)",
"def delete_profile(profile_id):\n \n profile = mongo.db.profiles\n profile.delete_one({'_id': ObjectId(profile_id)})\n flash('Your profile has been deleted.', 'success')\n return redirect(url_for('dashboard'))",
"def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)",
"def delete_cluster(self):\n cf_namespace_id = self.create_or_fetch_namespace()\n self.delete_action(cf_namespace_id)\n self.create_action(cf_namespace_id)\n self.invoke_action(cf_namespace_id)",
"def delete_test_cluster(ctx, spec_test_file, org_name, project_name,\n cluster_name_salt):\n # Step-1: determine the cluster name for the given test.\n cluster_name = get_cluster_name(get_test_name_from_spec_file(\n spec_test_file), cluster_name_salt)\n\n # Step-2: delete the cluster.\n organization = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n project = cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=organization.id)\n try:\n ctx.obj.groups[project.id].clusters[cluster_name].delete()\n except AtlasApiBaseError:\n pass",
"def delete_cluster(cluster_id: str, sg_id: str = None):\n print(\"INFO: Deleting cluster %s\" % cluster_id)\n emr = get_emr_client()\n emr.terminate_job_flows(JobFlowIds=[cluster_id])\n print(\"INFO: Cluster deleted.\")\n\n print(\"INFO: Waiting before deleting SG. . .\")\n sleep(300)\n if sg_id is not None:\n delete_sg(sg_id)\n\n os.remove(META_FILE)\n os.remove(\"connection.bash\")",
"def fusion_api_delete_server_profile_template(self, name=None, uri=None, api=None, headers=None):\n return self.profile_template.delete(name, uri, api, headers)",
"def delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tdeleteresource = lbprofile()\n\t\t\t\tif type(resource) != type(deleteresource):\n\t\t\t\t\tdeleteresource.lbprofilename = resource\n\t\t\t\telse :\n\t\t\t\t\tdeleteresource.lbprofilename = resource.lbprofilename\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tdeleteresources = [ lbprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tdeleteresources[i].lbprofilename = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tdeleteresources = [ lbprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tdeleteresources[i].lbprofilename = resource[i].lbprofilename\n\t\t\t\tresult = cls.delete_bulk_request(client, deleteresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e",
"def delete(self):\n logger.info(\"/cluster action=\" + r.method)\n # request_data = r.get_json(force=True, silent=True)\n # if r.form:\n # cluster_id = r.form[\"id\"]\n # col_name = r.form[\"col_name\"]\n # else:\n # cluster_id = request_data.get(\"id\")\n # col_name = request_data.get(\"col_name\")\n # request_debug(r, logger)\n args = cluster_delete_parser.parse_args()\n cluster_id = args.get('cluster_id')\n # col_name = args.get('state')\n if not cluster_id:\n error_msg = \"缺少参数\"\n logger.warning(error_msg)\n return make_fail_resp(error=error_msg)\n else:\n logger.debug(\"cluster delete with id={0}\".format(\n cluster_id))\n try:\n cluster = ClusterModel.objects.get(id=cluster_id)\n except Exception as e:\n logger.error(e)\n return {'stat': 400, 'msg': '不存在'}\n # status = cluster.state\n delete_cluster(cluster_id=cluster_id, status='active')\n return make_ok_resp()",
"def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"",
"def delete_profile(subscription_key, profile_id):\r\n\r\n helper = VerificationServiceHttpClientHelper.VerificationServiceHttpClientHelper(subscription_key)\r\n\r\n helper.delete_profile(profile_id)\r\n\r\n print('Profile {0} has been successfully deleted.'.format(profile_id))"
] | [
"0.73555297",
"0.7025996",
"0.6983594",
"0.69076455",
"0.64352804",
"0.643093",
"0.6422486",
"0.64189655",
"0.6414927",
"0.63980794",
"0.6384168",
"0.6366012",
"0.62055856",
"0.61796516",
"0.6123568",
"0.6111072",
"0.60962975",
"0.6093105",
"0.6040947",
"0.5955083",
"0.5925367",
"0.59209293",
"0.5864404",
"0.58526397",
"0.58497393",
"0.5846671",
"0.5824111",
"0.58160985",
"0.57988596",
"0.5794592"
] | 0.8240659 | 0 |
Creates a virtual switch layout. [Arguments] | def fusion_api_create_virtual_switch_layout(self, body, api=None, headers=None):
return self.cluster_profile.create(body=body, api=api, headers=headers, param='/virtualswitch-layout') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_layout( self ):",
"def create_logical_router(self, switch):\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n # TODO: lswitch from nb api\n router_ports = []\n dpid = str(switch.dp.id)\n\n for port in switch.ports:\n # network = \"192.168.33.1/24\",\n network = None\n ip = None\n if dpid == '1':\n if port.port_no == 1:\n network = SUBNET1\n ip = DP1_PORT1_GATEWAY_IP\n else:\n network = SUBNET2\n ip = DP1_PORT2_GATEWAY_IP\n elif dpid == '2':\n if port.port_no == 1:\n network = SUBNET2\n ip = DP2_PORT1_GATEWAY_IP\n else:\n network = SUBNET3\n ip = DP2_PORT2_GATEWAY_IP\n elif dpid == '3':\n if port.port_no == 1:\n network = SUBNET3\n ip = DP3_PORT1_GATEWAY_IP\n else:\n network = SUBNET4\n ip = DP3_PORT2_GATEWAY_IP\n else:\n print \"Datapath {} not supported. Router not created!\".format(dpid)\n return\n if network and ip:\n router_port = l3.LogicalRouterPort(lswitch=\"{}\".format(switch.dp.id),\n topic=\"fake_tenant1\",\n network=network,\n gateway_ip=ip,\n mac=\"{}\".format(port.hw_addr),\n port_no=str(port.port_no),\n unique_key=4,\n id=\"{}:{}\".format(switch.dp.id, port.port_no))\n router_ports.append(router_port)\n\n router = l3.LogicalRouter(name=\"router_of_{}\".format(switch.dp.id),\n topic=\"fake_tenant1\",\n version=10,\n id=\"{}\".format(switch.dp.id),\n unique_key=5,\n ports=router_ports)\n self.nb_api.create(router)",
"def create_svs(self, svs_name, vmnic, num_ports=8):\n\n svs = vim.host.VirtualSwitch.Specification()\n svs.numPorts = num_ports\n svs.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[vmnic])\n host_network_obj = self.host_obj.configManager.networkSystem\n host_network_obj.AddVirtualSwitch(vswitchName=svs_name, spec=svs)",
"def setupVolumeNodeViewLayout(self):\n layoutNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLLayoutNode')\n layoutNodes.SetReferenceCount(layoutNodes.GetReferenceCount()-1)\n layoutNodes.InitTraversal()\n layoutNode = layoutNodes.GetNextItemAsObject()\n layoutNode.SetViewArrangement(slicer.vtkMRMLLayoutNode.SlicerLayoutTwoOverTwoView)",
"def switch(n, m):\r\n out = []\r\n\r\n # Convert to stupid letters for 1 and 2\r\n name_letters = {1: \"S\", 2: \"D\"}\r\n name_n = name_letters[n] if n in name_letters else str(n)\r\n name_m = name_letters[m] if m in name_letters else str(m)\r\n\r\n # Number of pins on the right is n*m, plus one per pole for spacing,\r\n # minus the final spacing (n starts at 1), rounded up to nearest odd\r\n # number so that half the height is on the 100mil grid.\r\n n_pins_right = n * m + n - 1\r\n if n_pins_right % 2 == 0:\r\n n_pins_right += 1\r\n height = 100 * (n_pins_right - 1)\r\n hheight = height // 2\r\n\r\n # Ref goes at the top, 100 above the top pin, unless only one throw\r\n # in which case we also need to clear the switch graphic\r\n refheight = hheight + 100\r\n if m == 1:\r\n refheight += 50\r\n\r\n # Value/name goes below, unless m is even, in which case the bottom spacer\r\n # isn't there so needs to be ignored\r\n valheight = -(hheight + 100)\r\n if n % 2 == 1 and m % 2 == 0:\r\n valheight += 100\r\n\r\n # Output component header\r\n name = \"SWITCH_{}P{}T\".format(name_n, name_m)\r\n out.append(\"#\\n# {}\\n#\".format(name))\r\n out.append('DEF {} SW 0 1 Y N 1 F N'.format(name))\r\n out.append('F0 \"SW\" 0 {} 50 H V C CNN'.format(refheight))\r\n out.append('F1 \"{}\" 0 {} 50 H V C CNN'.format(name, valheight))\r\n out.append('F2 \"\" 0 0 50 H I C CNN')\r\n out.append('F3 \"\" 0 0 50 H I C CNN')\r\n out.append('DRAW')\r\n\r\n # Output drawing\r\n pole_top = hheight\r\n for pole in range(n):\r\n # Draw pole\r\n pole_num = pole*(m+1) + 2\r\n pole_y = pole_top - (100 * (m - 1))//2\r\n if m % 2 == 0:\r\n pole_y -= 50\r\n out.append('X \"~\" {} -100 {} 40 R 50 50 1 1 P'\r\n .format(pole_num, pole_y))\r\n out.append('C -50 {} 10 1 1 0 N'.format(pole_y))\r\n out.append('P 2 1 1 0 -50 {} 50 {} N'\r\n .format(pole_y + 10, pole_y + 90))\r\n\r\n for throw in range(m):\r\n # Draw throws\r\n throw_num = pole_num + throw - 1\r\n throw_y = pole_top - 100 * throw\r\n if throw > 0:\r\n throw_num += 1\r\n out.append('X \"~\" {} 100 {} 40 L 50 50 1 1 P'\r\n .format(throw_num, throw_y))\r\n out.append('C 50 {} 10 1 1 0 N'.format(throw_y))\r\n\r\n # Move down for next pole\r\n pole_top -= 100 * (m + 1)\r\n\r\n # Draw connecting dashed line\r\n if n > 1:\r\n pole_y = hheight - (100 * (m - 1))//2 + 50\r\n if m % 2 == 0:\r\n pole_y -= 50\r\n for _ in range(5*(m+1)*(n-1)):\r\n out.append('P 2 1 1 0 0 {} 0 {} N'\r\n .format(pole_y, pole_y - 5))\r\n pole_y -= 20\r\n\r\n # Done\r\n out.append('ENDDRAW\\nENDDEF\\n')\r\n\r\n return out",
"def new_vm():\n\tcfg_path = input(\"\\n\\nInsert the ClickOS .cfg file absolute path:\\n\")\n\n\tbridge_name = get_bridge_name(cfg_path)\n\tif len(bridge_name) == 0:\n\t\tprint(\"Couldnt find the bridge name.\")\n\t\treturn 0\n\n\tcreate_bridge(bridge_name)\n\n\tboot_vm(cfg_path)\n\n\treturn 1",
"def __createLayout(self):\r\n self.__createCanvas()\r\n self.__createButton()\r\n self.__createInputFunction()\r\n self.__createLimits()\r\n self.__styleLayout()",
"def _generate_layout(self):\n\n pass",
"def _create_switch(knx_module: XKNX, config: ConfigType) -> XknxSwitch:\n return XknxSwitch(\n knx_module,\n name=config[CONF_NAME],\n group_address=config[CONF_ADDRESS],\n group_address_state=config.get(SwitchSchema.CONF_STATE_ADDRESS),\n )",
"def draw_layout(self):\n\n if not self.fg2d_s_short:\n raise ValueError('This template current only works if source wires of fg2d are shorted.')\n\n threshold = self.params['threshold']\n draw_boundaries = self.params['draw_boundaries']\n num_blk = self.params['num_blk']\n show_pins = self.params['show_pins']\n\n row_list = ['ptap', 'nch', 'pch', 'ntap']\n orient_list = ['R0', 'MX', 'R0', 'MX']\n thres_list = [threshold] * 4\n\n # compute number of tracks\n # note: because we're using thick wires, we need to compute space needed to\n # satisfy DRC rules\n hm_layer = self.conn_layer + 1\n num_g_tracks = [0, 0, 0, 0]\n num_gb_tracks = [0, 0, 0, 0]\n num_ds_tracks = [0, 0, 0, 0]\n\n # to draw special stack driver primitive, we need to enable dual_gate options.\n options = dict(dual_gate=True)\n row_kwargs = [{}, options, options, {}]\n if draw_boundaries:\n end_mode = 15\n else:\n end_mode = 0\n\n # specify row types\n self.set_row_types(row_list, orient_list, thres_list, draw_boundaries, end_mode,\n num_g_tracks, num_gb_tracks, num_ds_tracks, guard_ring_nf=0,\n row_kwargs=row_kwargs)\n\n # determine total number of blocks\n # draw nwell tap\n row_idx = 3\n nw_tap = self.add_laygo_primitive('sub', loc=(0, row_idx), nx=num_blk, spx=1)\n\n # draw pmos row\n row_idx = 2\n pmos = self.add_laygo_primitive('dual_stack2s', loc=(0, row_idx), nx=num_blk, spx=1)\n\n # draw nmos row\n row_idx = 1\n nmos = self.add_laygo_primitive('dual_stack2s', loc=(0, row_idx), nx=num_blk, spx=1)\n\n # draw pwell tap\n row_idx = 0\n pw_tap = self.add_laygo_primitive('sub', loc=(0, row_idx), nx=num_blk, spx=1)\n\n # compute overall block size\n self.set_laygo_size(num_col=num_blk + 4)\n self.fill_space()\n # draw boundaries and get guard ring power rail tracks\n self.draw_boundary_cells()",
"def build_switches(self , level, last_level , parent_sws = [] , total_sw_count = 0):\n if level == last_level : return parent_sws\n sws = []\n # cantidad de switches en este nivel\n sw_count = 2**level\n lower_bound = total_sw_count\n upper_bound = lower_bound + sw_count\n for i in range(lower_bound , upper_bound):\n sw_id = i + 1\n # creo un switch\n sw = self.addSwitch('s%s' % sw_id)\n if level == 0 : self.root_sw = sw\n sws.append(sw)\n # conecto el nuevo switch con todos los switches padre\n for parent_sw in parent_sws:\n self.addLink(sw, parent_sw)\n # los switches creados en este nivel seran los padres del nivel siguiente\n return self.build_switches(level + 1 , last_level , sws , total_sw_count + sw_count)",
"def _draw_switch(self, instance: Switch, shape: pymunk.Shape,\n view: tk.Canvas, offset: Tuple[int, int]) -> List[int]:\n if instance.get_pressed(): # if switch is pressed\n image = self.load_image(\"switch_pressed\")\n else:\n image = self.load_image(\"switch\")\n\n return [view.create_image(shape.bb.center().x + offset[0], shape.bb.center().y,\n image=image, tags=\"block\")]",
"def logical_switch_create(client_session, transport_zone, logical_switch_name, control_plane_mode=None):\n vdn_scope_id, vdn_scope = get_scope(client_session, transport_zone)\n assert vdn_scope_id, 'The Transport Zone you defined could not be found'\n if not control_plane_mode:\n control_plane_mode = vdn_scope['controlPlaneMode']\n\n # get a template dict for the lswitch create\n lswitch_create_dict = client_session.extract_resource_body_example('logicalSwitches', 'create')\n\n # fill the details for the new lswitch in the body dict\n lswitch_create_dict['virtualWireCreateSpec']['controlPlaneMode'] = control_plane_mode\n lswitch_create_dict['virtualWireCreateSpec']['name'] = logical_switch_name\n lswitch_create_dict['virtualWireCreateSpec']['tenantId'] = ''\n\n # create new lswitch\n new_ls = client_session.create('logicalSwitches', uri_parameters={'scopeId': vdn_scope_id},\n request_body_dict=lswitch_create_dict)\n return new_ls['body'], new_ls['location']",
"def test_make_layout():\n raw = hcp.read_raw(data_type='rest', **hcp_params).crop(0, 1).load_data()\n raw.pick_types()\n lout = make_hcp_bti_layout(raw.info)\n assert_equal(lout.names, raw.info['ch_names'])",
"def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)",
"def switchshow(obj, content, append_buf=''):\n switch_obj, proj_obj = None, obj.r_project_obj()\n\n for buf in content:\n if 'switchWwn:' in buf:\n k, v = _split_parm(buf)\n switch_obj = proj_obj.s_add_switch(v + append_buf)\n break\n if switch_obj is None:\n brcdapi_log.exception('Could not find switchWwn in', echo=True)\n return switch_obj\n\n # Get the basic switch information\n i = 0\n while len(content) > i:\n buf = content[i]\n if len(buf) > len('Index') and buf[0: len('Index')] == 'Index' or 'LS Attributes:' in buf:\n break\n k, v = _split_parm(buf)\n if k == 'switchId':\n v = '0x' + v\n elif k == 'switchDomain':\n v = int(v.replace(' (unconfirmed)', ''))\n if k in _switchshow_tbl:\n brcddb_util.add_to_obj(switch_obj, _switchshow_tbl[k], v)\n elif k == 'switchRole':\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_principal, 1 if 'Principal' in v else 0)\n elif k == 'switchState':\n if v == 'Online':\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_op_status, 2)\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_enabled_state, True)\n else:\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_op_status, 3)\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_enabled_state, False)\n elif k in _switch_attributes_T_F.keys():\n brcddb_util.add_to_obj(switch_obj, _switch_attributes_T_F[k], False if 'OFF' in v.upper() else True)\n elif k in _switch_0_1_boolean_off_on.keys():\n brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_off_on[k], 0 if 'OFF' in v.upper() else 1)\n elif k in _switch_0_1_boolean_yes_no.keys():\n brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_yes_no[k], 0 if 'NO' in v.upper() else 1)\n i += 1\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_sw_user_name, switch_obj.r_get(brcdapi_util.bfs_sw_user_name))\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfs_did, switch_obj.r_get(brcdapi_util.bfs_did))\n\n # Get the logical switch attributes. Note that these are formated on a single line rather than in a list as the\n # other switch attributes are displayed.\n if 'LS Attributes:' in buf:\n for t_buf in buf[len('LS Attributes:'):].replace('[', '').replace(']', '').replace('\\t', '').strip().split(','):\n cl = [c.strip() for c in t_buf.split(':')]\n if len(cl) == 1 and 'Address Mode' in cl[0]:\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfc_area_mode, int(cl[0].split(' ')[2]))\n elif len(cl) == 2 and cl[0] in _switch_0_1_boolean_off_on.keys():\n brcddb_util.add_to_obj(switch_obj,\n _switch_0_1_boolean_off_on[cl[0]],\n 0 if 'OFF' in cl[1].upper() else 1)\n elif len(cl) == 2 and cl[0] in _switch_0_1_boolean_yes_no.keys():\n brcddb_util.add_to_obj(switch_obj, _switch_0_1_boolean_yes_no[cl[0]], 0 if 'NO' in cl[1].upper() else 1)\n i += 1\n\n # Figure out where the indices are for the port parameters. Note that they are different for bladed vs. fixed port\n # switches and ge ports do not have an index\n port_index = dict()\n while len(content) > i:\n buf = content[i]\n if 'Index' in buf and 'Media' in buf:\n cl = gen_util.remove_duplicate_char(buf, ' ').strip().split(' ')\n for x in range(0, len(cl)):\n port_index.update({cl[x]: x})\n break\n i += 1\n\n # Now get the port information\n switch_port_list = list()\n brcddb_util.add_to_obj(switch_obj, brcdapi_util.bfc_area_mode, switch_port_list)\n i += 2 # Skip the line just below it that has ================ in it\n while len(content) > i:\n buf = content[i].replace('\\t', ' ').strip()\n cl = gen_util.remove_duplicate_char(buf, ' ').split(' ')\n if len(cl) < 6:\n break\n if 'ge' in cl[0]:\n cl.insert(1, None) # It's a fixed port switch. ge ports do not have an FC address\n cl.insert(0, None) # ge ports do not have an index\n elif 'ge' in cl[1]:\n cl.insert(2, None) # It's a director. ge ports do not have an FC address\n cl.insert(0, None) # ge ports do not have an index or an FC address\n else:\n cl[port_index['Index']] = int(cl[port_index['Index']])\n cl[port_index['Address']] = '0x' + cl[port_index['Address']]\n\n proto = cl[port_index['Proto']]\n if proto == 'FC' or proto == 'VE' or proto == 'FCIP':\n port_desc = ' '.join(cl[port_index['Proto']:])\n port_num = '0' if port_index.get('Slot') is None else cl[port_index.get('Slot')]\n port_num += '/' + cl[port_index['Port']]\n physical_state = _physical_port_state.get(cl[port_index['State']])\n try:\n speed = int(gen_util.non_decimal.sub('', cl[port_index['Speed']])) * 1000000000\n except ValueError:\n speed = 32000000000\n port_d = {\n 'name': port_num,\n 'index': cl[port_index['Index']],\n 'fcid-hex': cl[port_index['Address']],\n 'auto-negotiate': 1 if 'N' in cl[port_index['Speed']] else 0,\n 'speed': speed,\n 'operational-status': 2 if 'Online' in cl[port_index['State']] else 3,\n 'is-enabled-state': False if 'Disabled' in port_desc or 'license not assigned' in port_desc else True,\n 'physical-state': 'unknown' if physical_state is None else physical_state,\n 'neighbor': dict(wwn=list()),\n }\n for k, v in _physical_pbs_port_type.items():\n if k in port_desc:\n port_d.update(({'port-type': v}))\n break\n if port_d.get('port-type') is None:\n port_d.update({'port-type': brcddb_common.PORT_TYPE_U}) # Typical of an offline port\n switch_port_list.append(port_num)\n port_obj = switch_obj.s_add_port(port_num) if proto == 'FC' \\\n else switch_obj.s_add_ve_port(port_num) if proto == 'VE' \\\n else switch_obj.s_add_ge_port(port_num) if proto == 'FCIP' \\\n else None\n if port_obj is None:\n brcdapi_log.exception('Unexpected error in: ' + buf, echo=True)\n port_obj.s_new_key('fibrechannel', port_d)\n i += 1\n\n return switch_obj, i",
"def createSpSwConstraint(parents, target, enumNames, niceNames=['Space'],constrType='parent',constrTarget=''):\n if constrTarget == '':\n if target.endswith('_CTRL'):\n stripName=target.rpartition('_')\n constrTarget=stripName[0]+'Ctrl_ROOT'\n else:\n constrTarget=target\n\n if niceNames <= 1:\n niceName=niceNames\n else:\n niceName=''\n for i,x in enumerate(niceNames):\n if i < len(niceNames)-1:\n niceName=niceName+x+' / '\n else:\n niceName=niceName+x\n\n existingAttr=cmds.listAttr(target)\n constr=eval('cmds.'+constrType+'Constraint(parents,constrTarget,mo=True)')\n if 'spSwSep' not in existingAttr:\n cmds.addAttr(target, ln='spSwSep', nn='___ Space Switching', at='enum', en='___', k=True)\n cmds.addAttr(target, ln='spaceSwitch', nn=niceName+' Switch', at='enum', en=enumNames, k=True)\n for i,x in enumerate(parents):\n if not i == 1:\n rev=cmds.createNode('reverse', n=target+'spaceSwitch_REV')\n cmds.connectAttr(target+'.spaceSwitch',rev+'.inputX')\n cmds.connectAttr(rev+'.outputX', constr[0]+'.'+x+'W'+str(i))\n else:\n cmds.connectAttr(target+'.spaceSwitch', constr[0]+'.'+x+'W'+str(i))",
"def create_and_bridge_iperf_virtual_interface(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n ifaces = VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n self._sockfilename = \"/var/run/vpp/{}.sock\".format(\n ifaces[0][\"name\"].replace(\"/\", \"_\")\n )\n if os.path.exists(self._sockfilename):\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._iperf_vm_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_iperf_vm\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def _setup_layout(self):\r\n\t\tbtn_toggle_server = Button(self, \\\r\n\t\t\ttext = \"啟動伺服器\", command = self._toggle_server, \\\r\n\t\t\tname = \"btn_toggle_server\")\r\n\t\tbtn_toggle_server.pack(side = LEFT)\r\n\r\n\t\tlabel_IP = Label(self, text = \"IP: \", anchor = W)\r\n\t\tlabel_IP.pack(side = LEFT)\r\n\t\tentry_IP = Entry(self, width = 15, name = \"entry_IP\")\r\n\t\tentry_IP.pack(side = LEFT)\r\n\t\tlabel_port = Label(self, text = \"Port: \", anchor = W)\r\n\t\tlabel_port.pack(side = LEFT)\r\n\t\tentry_port = Entry(self, width = 5, name = \"entry_port\")\r\n\t\tentry_port.pack(side = LEFT)\r\n\r\n\t\tlabel_connections = Label(self, text = \"連接數: -/-\", \\\r\n\t\t\tname = \"label_connections\")\r\n\t\tlabel_connections.pack(side = LEFT)",
"def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)",
"def create_vlan(module, switch, vlan_id, untagged_ports=None):\n global CHANGED_FLAG\n output = ''\n new_vlan = False\n\n cli = pn_cli(module)\n cli += ' vlan-show format id no-show-headers '\n existing_vlans = run_cli(module, cli)\n\n if existing_vlans is not None:\n existing_vlans = existing_vlans.split()\n if vlan_id not in existing_vlans:\n new_vlan = True\n\n if new_vlan or existing_vlans is None:\n cli = pn_cli(module)\n cli += ' vlan-create id %s scope fabric ' % vlan_id\n\n if untagged_ports is not None:\n cli += ' untagged-ports %s ' % untagged_ports\n\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created vlan with id %s\\n' % (switch, vlan_id)\n\n return output",
"def _add_switch(self, switchdesc):\n # Check switch definition parameters\n switch_attributes = list(switchdesc.keys())\n if not set(switch_attributes).issubset(self.switch_attributes):\n raise ValueError(\n \"Switch definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported switch parameters are '{2}'.\".format(\n json.dumps(switchdesc, indent=2), self._xmlfile,\n self.switch_attributes))\n for mandatory_parameter in self.switch_attributes[:2]:\n if mandatory_parameter not in switch_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in switch definition: \"\n \"'{1}' defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(switchdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the switch is not already reserved\n switch_name = switchdesc[self.switch_attributes[0]][0]\n if switch_name in self._switches:\n raise ValueError(\n \"The switch name '{0}' defined in '{1}' is \"\n \"already used.\".format(switch_name, self._xmlfile))\n\n # Create the switch control\n switch_paths = {}\n for pathdesc in switchdesc[self.switch_attributes[1]]:\n path_name = pathdesc[self.switch_path[0]][0]\n path_boxes = [box[self.unit_attributes[0]]\n for box in pathdesc[self.switch_path[1]]]\n switch_paths[path_name] = path_boxes\n switch_keys = list(switch_paths.keys())\n control = controls[\"Enum\"](\n choices=tuple(switch_keys),\n switch_name=switch_name,\n desc=(\"Switch between paths '{0}:{1}' defined in pipeline '{2}'\"\n \".\".format(switch_name, \"-\".join(switch_keys), self.id)))\n setattr(self.inputs, switch_name, control)\n self._switches[switch_name] = switch_paths\n control.add_observer(\"value\", self._update_activation)\n control.value = switch_keys[0]",
"def create_layout() -> None:\n\n st.sidebar.title(\"Menu\")\n app_mode = st.sidebar.selectbox(\"Please select a page\", [' I. Homepage',\n \"II. Download data\" ,\n \"III. Statistic Data\",\n ' IV. AGF Indices',\n ' V. Notes',\n \" VI. Rank of patient\" ])\n \n if app_mode == ' I. Homepage':\n load_homepage() \n elif app_mode == \"III. Statistic Data\":\n leyer.leyer() \n elif app_mode == ' IV. AGF Indices':\n single.AGF_indices() \n elif app_mode == \"II. Download data\":\n download_data.download_data() \n elif app_mode == ' V. Notes':\n text_input.text_input()\n elif app_mode == \" VI. Rank of patient\":\n rank_of_patient.rank_of_patient()",
"def createLayout(dash_instance):\n\t\n\tcreateInitialChildren(dash_instance)\n\tdash_instance.createDashboardLayout()",
"def _create_switchport(self, device, intf_type, intf_name, trunk_no_default_native):\n\n try:\n device.interface.switchport(int_type=intf_type, name=intf_name)\n if trunk_no_default_native:\n self.logger.info('Configuring switchport mode as `trunk_no_default_native`'\n ' on the interface %s', intf_name)\n device.interface.trunk_mode(int_type=intf_type, name=intf_name,\n mode='trunk-no-default-native')\n else:\n self.logger.info('Configuring switchport mode as `trunk`'\n ' on the interface %s', intf_name)\n device.interface.trunk_mode(int_type=intf_type,\n name=intf_name, mode='trunk')\n except ValueError as e:\n self.logger.exception(\"Configuring Switch port trunk failed due to %s\"\n % (e.message))\n raise ValueError(\"Configuring Switch port trunk failed\")\n\n return True",
"def create_and_bridge_virtual_interfaces(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._create_vints_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def add_vport(self, switch_name):\n # Create tap devices for the VM\n tap_name = 'tap' + str(self._vport_id)\n self._vport_id += 1\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tap_name, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger,\n 'Creating tap device...', False)\n\n tap_cmd_list = ['sudo', 'ip', 'tuntap', 'add', tap_name, 'mode', 'tap']\n # let's assume, that all VMs have NIC QUEUES enabled or disabled\n # at the same time\n if int(settings.getValue('GUEST_NIC_QUEUES')[0]):\n tap_cmd_list += ['multi_queue']\n tasks.run_task(tap_cmd_list, self._logger,\n 'Creating tap device...', False)\n if settings.getValue('VSWITCH_JUMBO_FRAMES_ENABLED'):\n tasks.run_task(['ifconfig', tap_name, 'mtu',\n str(settings.getValue('VSWITCH_JUMBO_FRAMES_SIZE'))],\n self._logger, 'Setting mtu size', False)\n\n tasks.run_task(['sudo', 'ip', 'addr', 'flush', 'dev', tap_name],\n self._logger, 'Remove IP', False)\n tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', tap_name, 'up'],\n self._logger, 'Bring up ' + tap_name, False)\n\n bridge = self._bridges[switch_name]\n of_port = bridge.add_port(tap_name, [])\n return (tap_name, of_port)",
"def create_switch():\n connection = MagicMock()\n connection.address = 'addr'\n connection.port = 'port'\n connection.protocol.version = 0x04\n switch = Switch('00:00:00:00:00:00:00:01', connection)\n switch._enabled = True\n return switch",
"def build_view(frame, box, _view):\n\n\tif isinstance(_view, view.Switch):\n\t\tfor action in _view.get_actions():\n\t\t\tbutton = ActionButton(action, _view)\n\t\t\tbox.pack_start(button.make(frame), False, False, 0)",
"def createLayout(self):\n return _libsbml.LayoutModelPlugin_createLayout(self)"
] | [
"0.6015329",
"0.57742697",
"0.56836456",
"0.5482166",
"0.54532725",
"0.5418142",
"0.53833425",
"0.53744787",
"0.5351992",
"0.53280634",
"0.53216034",
"0.53180486",
"0.5310005",
"0.53014076",
"0.52938443",
"0.524624",
"0.5244449",
"0.5193772",
"0.51780003",
"0.5163845",
"0.51432645",
"0.51365906",
"0.5109504",
"0.5081305",
"0.5066193",
"0.50623393",
"0.50518054",
"0.5044208",
"0.5042881",
"0.49738672"
] | 0.67626137 | 0 |
Gets a Hypervisor host profile. [Arguments] | def fusion_api_get_hypervisor_host_profile(self, uri=None, param='', api=None, headers=None):
return self.host_profile.get(uri, api, headers, param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_hypervisor_cluster_profile(self, uri=None, param='', api=None, headers=None):\n return self.cluster_profile.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_hypervisor_host(self, uri=None, param='', api=None, headers=None): # pylint: disable=unused-argument\n return self.hypervisor_host.get(uri, api, headers, param='')",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"def hosting_environment_profile(self) -> pulumi.Output[Optional['outputs.HostingEnvironmentProfileResponse']]:\n return pulumi.get(self, \"hosting_environment_profile\")",
"def hosting_environment_profile(self) -> pulumi.Output[Optional['outputs.HostingEnvironmentProfileResponse']]:\n return pulumi.get(self, \"hosting_environment_profile\")",
"def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)",
"def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]",
"def get_user_profile(self):\n\t\treturn Job(SDK.PrlSrv_GetUserProfile(self.handle)[0])",
"def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')",
"def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)",
"def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()",
"def getprofile(): # real signature unknown; restored from __doc__\n pass",
"def get_profile(request):\n p_obj = Profile.objects.filter(hashid=request.session.get('profile', '-'))\n if len(p_obj):\n return p_obj[0]\n else:\n return None",
"def get_hypervisor_info(self):\n try:\n req = Request(self.compute_url +\n \"/os-hypervisors/detail\" )\n self._upgrade_to_authenticated_request(req)\n resp = urlopen(req)\n content = resp.read().decode('utf-8')\n encoded = json.loads(content)\n resp.close()\n except URLError as e:\n return {}\n except Exception as e:\n raise Exception(\"Unable to process compute reponse: %s\" % e)\n\n return encoded['hypervisors']",
"def show_network_profile(self, profile, **params):\r\n return self.get(self.network_profile_path % (profile), params=params)",
"def ex_get_hypervisor_hostname(self):\n hostname = self.connection.getHostname()\n return hostname",
"def compute_profile(self) -> 'outputs.ClusterPoolResourcePropertiesResponseComputeProfile':\n return pulumi.get(self, \"compute_profile\")",
"def get_supervisor_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_SUPERVISOR_INFO)",
"def _get_profile(self):\n return self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"profile\")\n )",
"def get_hypervisor(self, graph_db):\n node = neo_resource.get_node_by_property(graph_db,\n self.label,\n property_key='hostname',\n property_value=self.hostname)\n return node",
"def profile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"profile\")",
"def get_host_info(self, args, get_all=False):\n return None",
"def get_profile(profile_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProfileResult:\n __args__ = dict()\n __args__['profileName'] = profile_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:network/v20220401preview:getProfile', __args__, opts=opts, typ=GetProfileResult).value\n\n return AwaitableGetProfileResult(\n allowed_endpoint_record_types=pulumi.get(__ret__, 'allowed_endpoint_record_types'),\n dns_config=pulumi.get(__ret__, 'dns_config'),\n endpoints=pulumi.get(__ret__, 'endpoints'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n max_return=pulumi.get(__ret__, 'max_return'),\n monitor_config=pulumi.get(__ret__, 'monitor_config'),\n name=pulumi.get(__ret__, 'name'),\n profile_status=pulumi.get(__ret__, 'profile_status'),\n tags=pulumi.get(__ret__, 'tags'),\n traffic_routing_method=pulumi.get(__ret__, 'traffic_routing_method'),\n traffic_view_enrollment_status=pulumi.get(__ret__, 'traffic_view_enrollment_status'),\n type=pulumi.get(__ret__, 'type'))",
"def profile(self):\n return self._profile",
"def profile(self):\n return self.__profile",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def get_supervisor_stats(hass):\n return hass.data.get(DATA_SUPERVISOR_STATS)",
"def get_profile_stats():\n return p_stats",
"def get_profile():\n if environ['DB_INSTANCE'] in request.url_root:\n profile_id = request.form['id']\n profile = ndb.Key(Profile, profile_id).get()\n if profile is not None:\n activity_data = json.loads(profile.activity_data)\n items = activity_data.get('items', [])\n item = items[0]\n return json.dumps(item)\n \n # else (not DB_INSTANCE)\n return ''"
] | [
"0.6291275",
"0.62387",
"0.59896106",
"0.5946244",
"0.5946244",
"0.5942801",
"0.5914829",
"0.58072954",
"0.5784481",
"0.5778",
"0.57352436",
"0.5682452",
"0.5610289",
"0.5591172",
"0.55779195",
"0.5567231",
"0.5545119",
"0.5533411",
"0.55181134",
"0.5480795",
"0.5428293",
"0.54232204",
"0.5408108",
"0.5398334",
"0.53844804",
"0.5368962",
"0.5368962",
"0.53324777",
"0.53305846",
"0.532681"
] | 0.80805653 | 0 |
Updates a hypervisor host profile. [Arguments] | def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):
return self.host_profile.update(body, uri, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def update(self,\n tunnel_profile_id,\n ip_sec_vpn_tunnel_profile,\n ):\n return self._invoke('update',\n {\n 'tunnel_profile_id': tunnel_profile_id,\n 'ip_sec_vpn_tunnel_profile': ip_sec_vpn_tunnel_profile,\n })",
"def update_flavor_profile(request, **kwargs):\n data = request.DATA\n flavor_profile_id = data['flavor_profile']['id']\n\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.update_flavor_profile(\n flavor_profile_id,\n name=data['flavor_profile'].get('name'),\n provider_name=data['flavor_profile'].get('provider_name'),\n flavor_data=data['flavor_profile'].get('flavor_data'),\n )\n\n return _get_sdk_object_dict(flavor_profile)",
"def update_my_user_profile(SshPublicKey=None):\n pass",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)",
"def test_update_hyperflex_node_profile(self):\n pass",
"def update(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n return host.update(**kwargs)",
"def _update(self, host):\n pass",
"def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)",
"def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })",
"def update_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def update_profile(username):\n\n description = request.json.get('description')\n token = request.headers.get('token')\n\n if description is None:\n return jsonify({'message': 'New description not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != username:\n return jsonify({'message': \"You may not edit others profiles\"}), 404\n\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n Profiles[username]['description'] = description\n return Profiles[username]",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def fusion_api_edit_server_profile_template(self, body, uri, api=None, headers=None):\n return self.profile_template.update(body, uri, api, headers)",
"def put(self, request, flavor_profile_id):\n update_flavor_profile(request)",
"def _edit_server_hardware(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n for profile in profile_obj:\n if not selenium2lib._is_element_present(FusionServerHardwarePage.ID_PAGE_LABEL):\n base_page.navigate_base(FusionServerHardwarePage.ID_PAGE_LABEL,\n FusionUIBaseElements.ID_MENU_LINK_SERVER_HARDWARE, \"css=span.hp-page-item-count\")\n if not serverhardware.power_off_server_by_name(profile.server):\n logger._warn(\"Failed to powerOff the server %s\" % profile.server)\n logger._warn(\"Can't proceed with server profile creation on server %s\" % profile.server)\n continue\n # Navigating to Server profile page\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n ui_lib.wait_for_element(FusionUIBaseElements.ID_MAIN_MENU_CONTROL, PerfConstants.DEFAULT_SYNC_TIME)\n navigate()\n\n profile_list = [el.text for el in selenium2lib._element_find(FusionServerProfilesPage.ID_PROFILE_LIST_NAMES, False, False)]\n if profile.profilename not in profile_list:\n logger._warn(\"Profile '%s' does not exist\" % profile.profilename)\n continue\n if profile.server == \"\":\n logger._warn(\"Mandatory fields to edit server hardware can't be empty\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_MAIN_ACTION)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_MENU_ACTION_EDIT)\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._log_to_console_and_log_file(\"Server is not powered off, and switching off now\")\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_POWER_PRESS_AND_HOLD)\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_SERVER_POWER_OFF_VALIDATE, PerfConstants.SERVER_POWER_OFF)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_DROPDOWN_SEARCH_SERVER_HARDWARE)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_EDIT_SEARCH_HARDWARE)\n if profile.unassign == \"unassigned\":\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.unassign)\n logger._log_to_console_and_log_file(\"Unassigning the server profile\")\n else:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_UPDATE_SERVER_PROFILE)\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_OFF_ERROR):\n logger._warn(\"Failed to power off the server %s\" % profile.server)\n else:\n logger._log_to_console_and_log_file(\"Successfully server %s is powered off\" % profile.server)\n\n ui_lib.wait_for_element(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION)\n # New Code\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION):\n errMsg = selenium2lib._get_text(FusionServerProfilesPage.ID_ADD_PROFILE_NOTIFICATION_CONTENT)\n logger._warn(errMsg)\n logger._warn(\"Unable to edit profile server hardware %s\" % profile.profilename)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE)\n continue\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n strTimeStamp = selenium2lib._get_text(FusionServerProfilesPage.ID_UPDATE_PROFILE_TIMESTAMP)\n logger._log_to_console_and_log_file(strTimeStamp)\n\n # Verify profile server hardware updation status in server profile page (Under Activity tab)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_OVERVIEW)\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_ACTIVITY)\n\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp), PerfConstants.CREATE_SERVER_PROFILE_TIME)\n\n if selenium2lib._is_element_present(FusionServerProfilesPage.ID_PROFILE_CREATION_STATUS % (\"Update\", strTimeStamp)):\n logger._log_to_console_and_log_file(\"Server profile '%s' is edited successfully\" % profile.profilename)\n else:\n logger._warn(\"Failed to edit server profile '%s' hardware\" % profile.profilename)",
"def edit_server_profile_for_dl(profile_obj):\n # This keyword is deprecated, please do not use.\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n\n total = len(profile_obj)\n not_exists = 0\n edited = 0\n\n for n, profile in enumerate(profile_obj):\n logger.info(\"{2} No: {0} --- Total: {1} {2}\".format((n + 1), total, '-' * 14))\n\n logger.info(\"editing a server profile with name '%s' ...\" % profile.name)\n if not VerifyServerProfile.verify_server_profile_exist(profile.name, fail_if_false=False):\n logger.warn(\"server profile '%s' does not exist\" % profile.name)\n not_exists += 1\n continue\n # - Prep the auto_power_off switch\n # - By default, this keyword will power off the server if it's powered on -- unless the attribute 'auto_power_off' is explicitly set to 'false'\n auto_power_off = False if getattr(profile, 'auto_power_off', '').lower() == 'false' else True\n # open Edit SP dialog and enter data ...\n CommonOperationServerProfile.click_server_profile(profile.name)\n EditServerProfile.select_action_edit()\n EditServerProfile.wait_edit_server_profile_dialog_shown()\n\n EditServerProfile.input_name(profile.newName)\n EditServerProfile.input_description(profile.desc)\n # Input 'Server hardware'\n # - input server name,\n # - select option from the popped out drop-down list,\n # - verify the server hardware is refreshed to the type name displayed in the drop-down list for selecting server hardware\n if not EditServerProfile.input_select_server_hardware(profile.server, auto_power_off=auto_power_off):\n logger.warn(\"server hardware '%s' is not selected for editing server profile, may be wrong name, or powered on but failed to power it off. \"\n \"test will skip this profile '%s' and continue to edit other server profiles\" % (profile.server, profile.name))\n continue\n msg = EditServerProfile.get_error_message_from_server_hardware()\n if msg is not None:\n logger.warn(\"error occurred, server profile can not be edited successfully\")\n ui_lib.fail_test(msg)\n sht_selected = EditServerProfile.get_selected_server_hardware_type(profile.server)\n if profile.hardwaretype not in sht_selected:\n logger.warn(\"the server hardware type of server '%s' is NOT consistent with test data '%s'\" % (sht_selected, profile.hardwaretype))\n # set boot mode if attribute 'manageBootMode' is true - only for Gen 9 (or later) server:\n FusionUIBase.select_view_by_name('Boot Settings')\n if 'gen9' in sht_selected.lower():\n logger.info(\"setting 'Boot mode' for Gen 9 specially ...\")\n if getattr(profile, 'manageBootMode', '').lower() == 'true':\n CommonOperationServerProfile.BootSettings.tick_manage_boot_mode()\n CommonOperationServerProfile.BootSettings.select_boot_mode_by_text(profile.bootMode) if hasattr(profile, 'bootMode') else None\n if getattr(profile, 'bootMode', '').lower() == 'legacy bios':\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n else:\n CommonOperationServerProfile.BootSettings.set_non_legacy_bios_mode_boot_order(profile, hardware_type=sht_selected)\n else:\n CommonOperationServerProfile.BootSettings.untick_manage_boot_mode()\n else:\n CommonOperationServerProfile.BootSettings.set_legacy_bios_mode_boot_order(profile)\n\n EditServerProfile.click_ok_button()\n # if EditServerProfile.get_error_message_from_boot_mode() is not None:\n if CommonOperationServerProfile.BootSettings.get_error_message_from_boot_mode() is not None:\n logger.warn(\"test data may be wrongly defined for 'Boot mode', which caused an error that blocks profile being edited. \"\n \"Test will skip this profile '%s' and continue to edit other server profiles\" % profile.name)\n continue\n\n status, msg = FusionUIBase.get_error_message_from_dialog(timeout=10)\n if status is True:\n logger.warn(\"unexpected error occurred: %s\" % msg)\n ui_lib.fail_test(msg)\n\n EditServerProfile.wait_edit_server_profile_dialog_disappear(timeout=180)\n FusionUIBase.show_activity_sidebar()\n FusionUIBase.wait_activity_action_ok(profile.newName, 'Update', timeout=300, fail_if_false=False)\n FusionUIBase.show_activity_sidebar()\n CommonOperationServerProfile.wait_server_profile_status_ok(profile.newName, timeout=180, fail_if_false=False)\n logger.info(\"edited server profile '%s' successfully\" % profile.newName)\n edited += 1\n\n logger.info(\"{0} == Summary == {0}\".format('-' * 14))\n if total - not_exists == 0:\n logger.warn(\"no server profile to edit! all %s server profile(s) is NOT existing, hence test is considered PASS\" % not_exists)\n return True\n else:\n if edited < total:\n logger.warn(\"not all of the server profile(s) is successfully edited - %s out of %s edited \" % (edited, total))\n if edited + not_exists == total:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, hence test is considered PASS\" % not_exists)\n return True\n else:\n logger.warn(\"%s non-existing server profile(s) is skipped being edited, but %s profile(s) left is failed being edited \" % (not_exists, total - edited - not_exists))\n return False\n\n logger.info(\"all of the server profile(s) is successfully edited - %s out of %s \" % (edited, total))\n return True",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def setprofile(variable, value, account, pair):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n keys = []\n values = []\n if pair:\n for p in pair:\n key, value = p.split(\"=\")\n keys.append(key)\n values.append(value)\n if variable and value:\n keys.append(variable)\n values.append(value)\n\n profile = Profile(keys, values)\n\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n\n json_metadata = Profile(acc[\"json_metadata\"] if acc[\"json_metadata\"] else {})\n json_metadata.update(profile)\n tx = acc.update_account_profile(json_metadata)\n tx = json.dumps(tx, indent=4)\n print(tx)",
"def update_apero_profile(params: Dict[str, Any], profile: int) -> Any:\n # deal with profile 1 or profile 2\n if profile == 1:\n profile_path = params['profile1']\n install_path = params.get('apero install 1', None)\n elif profile == 2:\n profile_path = params['profile2']\n install_path = params.get('apero install 2', None)\n else:\n emsg = 'profile must be 1 or 2'\n raise AperoCopyError(emsg)\n # use os to add DRS_UCONFIG to the path\n os.environ['DRS_UCONFIG'] = profile_path\n # allow getting apero\n if install_path is not None:\n sys.path.append(install_path)\n # load apero modules\n from apero.base import base\n from apero.core import constants\n from apero.core.constants import param_functions\n from apero.core.utils import drs_startup\n # reload DPARAMS and IPARAMS\n base.DPARAMS = base.load_database_yaml()\n base.IPARAMS = base.load_install_yaml()\n # ------------------------------------------------------------------\n apero_params = constants.load(cache=False)\n # invalidate cache\n param_functions.CONFIG_CACHE = dict()\n # set apero pid\n apero_params['PID'], apero_params['DATE_NOW'] = drs_startup.assign_pid()\n # no inputs\n apero_params['INPUTS'] = dict()\n apero_params['OBS_DIR'] = None\n # make sure parameters is reloaded (and not cached)\n return apero_params",
"def update(self, host, values):\n body = dict(host=values)\n return self._update(\"/os-hosts/%s\" % host, body, response_key='host')",
"def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)",
"def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200"
] | [
"0.6738024",
"0.63721013",
"0.6303127",
"0.6171435",
"0.6132946",
"0.6038028",
"0.59906864",
"0.5918259",
"0.5901422",
"0.58302",
"0.5811974",
"0.5763593",
"0.57516897",
"0.5698796",
"0.56739473",
"0.567078",
"0.5663646",
"0.56575376",
"0.56191176",
"0.55845803",
"0.5581878",
"0.5513214",
"0.54661506",
"0.5463933",
"0.54479927",
"0.54298484",
"0.5416579",
"0.5414281",
"0.5396372",
"0.53786075"
] | 0.80823314 | 0 |
Edit the remote support to initiate remote support registration [Example] ${resp} = Fusion Api Edit Remote Support | | | | def fusion_api_edit_remote_support(self, body, api=None, headers=None):
return self.remote_support.update(body, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit(cm_response, **data):\n return cm_response",
"def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)",
"def setResponse(self, response):\n if(Debug_Level==2):\n print'response =',response \n #removing the end line and splitting \n words = response.replace('\\'','').strip().split(',') #Stripping and Splitting \n\n if(len(words)>1):\n self.RC_COM = int(words[1])\n words2 = words[2].split(':')\n self.TrID = int(words2[0])\n self.RC = int(words2[1])\n self.parameters = words[3:len(words)]\n if(self.RC!=0 and Debug_Level==1):\n print 'Problem, Error code:', self.RC",
"def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)",
"def setup_response(self, system, location, definition, descriptor):\r\n pass",
"def do_response(data):\n def on_done(i):\n if i == -1:\n return\n\n cite_key = data[i][2]\n view = sublime.active_window().active_view()\n view.run_command(\"dblp_insert_result\", {\"text\": cite_key})\n\n sublime.active_window().show_quick_panel(data, on_done)",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def respond(self, response):\n self.response = response",
"async def support(self, ctx):\n await ctx.send('Support server:\\nhttps://discord.gg/dU39sjq')",
"def update():\n return 'update api in put'",
"def getUpdaterResponse(ip, port):\n template = \"\"\"<HTML>\n <BODY>\n Default updater response\n </BODY>\n {1.2.3.4}\"\"\"\n\n ip_section = \"\"\n port_section = \"\"\n\n if ip:\n ip_section = \"<IP>%s</IP>\" % ExplosiveEnc.encode_conf(ip)\n\n if port:\n port_section = \"<PORT>%s</PORT>\" % ExplosiveEnc.encode_conf(port)\n\n template += \"\\n %s \\n %s\" % (ip_section, port_section)\n template += \"\\n</HTML>\\n\\n\"\n\n return template",
"def respond(self, obj):\r\n url = '{0}/{1}'.format(self.get_url(), 'respond')\r\n request = http.Request('PUT', url, {'response': obj})\r\n\r\n return request, parsers.parse_json",
"def ProcessRemoteCommandsRequest(self):\n return (200, '')",
"async def register_completions(ls: RobotFrameworkLanguageServer, *args):\n params = RegistrationParams([Registration(str(uuid.uuid4()), COMPLETION, {\"triggerCharacters\": \"[':']\"})])\n response = await ls.register_capability_async(params)\n if response is None:\n ls.show_message(\"Successfully registered completions method\")\n else:\n ls.show_message(\"Error happened during completions registration.\", MessageType.Error)",
"def api():\n\treturn \"The API call\"",
"def get_initial_response():\n # Message to the user\n message = {\n 'apiVersion': 'v1.1',\n 'status': 'Online',\n 'message': 'Welcome to the Space Object Registry API. Refer to the documentation on https://github.com/wdelenclos/messier-registry.',\n 'sources' : sources\n }\n # Making the message looks good\n resp = jsonify(message)\n # Returning the object\n return resp",
"async def support(self, ctx: commands.Context) -> None:\n embed = (\n discord.Embed(\n title=\"Supported Services\",\n color=0x00FFCC,\n url=\"https://d.chulte.de\",\n )\n .add_field(\n name=\"YouTube\",\n value=\"Video Urls\\nVideo Search Terms\\nPlaylist Urls\",\n )\n .add_field(\n name=\"Spotify\",\n value=\"Track Links\\nAlbum Links\\nArtist Top-Tracks\\nPlaylists\",\n )\n )\n await ctx.send(embed=embed)",
"async def support(self, ctx):\n await ctx.send('Join the support server here: https://discord.gg/bAq8Ec5JPQ')",
"def register(args, config):\n\n api = config['API']\n r = Request(api['register'], method='GET')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n print('UH OH!')\n return\n # read in the template we got from the server\n jsn = json.loads(resp.read().decode())\n out = {}\n reqs = jsn.get('RegistrationRequirements')\n w = \"\"\"\n| *---------------------------------------------------------------* |\n| Welcome to FLEET, new user! Please follow the prompt to register. |\n| *---------------------------------------------------------------* |\n\"\"\"\n print(w)\n print('\\nPlease provide the following information: \\n')\n for k, v in reqs.items(): # prompt and assign to out\n m = '{} (Requirements: {}): '.format(k, v)\n if k.lower() == 'password':\n out[k.lower()] = getpass.getpass(m) # make keys lowercase\n else:\n out[k.lower()] = input(m)\n r = Request(\n api['register'], data=urlencode({'RegistrationInfo': out}).encode(), \n method='POST'\n )\n try:\n resp = urlopen(r)\n jsn = json.loads(resp.read().decode())\n except HTTPError as e:\n print('Something went wrong processing your request to register')\n return\n if jsn.get('errors'):\n print('Some errors were found. Please fix the following and retry:\\n')\n for e in jsn.get('errors'):\n print(e)\n else:\n info = jsn.get('registered')\n print('You have been successfully registered:\\n{}'.format(info))",
"def register_server():\n (code, message) = rest_api.register_server(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def fhir_enquiry(request, context_override={}):\n\n state = get_state(CLIENT_ID,AUTH_URL)\n code = get_code(CLIENT_ID,AUTH_URL)\n\n # set default context\n context = {}\n context['template'] = \"result.html\"\n context['get_fmt'] = \"json\"\n context['display'] = \"Me\"\n context['code'] = code\n context['state'] = state\n context['ask'] = \"/api/v1/me?_format=json\"\n context['url'] = settings.OAUTH_TEST_INFO['BASE']\n context['headers'] = {'content-type': 'application/x-www-form-urlencoded',\n 'Authorization': \"Bearer \"+ get_code(CLIENT_ID, AUTH_URL)},\n\n # add / overwrite anything in context_override\n context = update_dict(context, context_override)\n\n data = {'code': code,\n 'grant_type': 'authorization_code',\n 'key': 'access_token',\n #'key': 'refresh_token',\n 'access_token': get_access(state),\n 'refresh_token': get_refresh(state),\n 'redirect_uri': REDIRECT_URI}\n\n if settings.DEBUG:\n print(\"Context after update:\", context)\n print(\"Data:\", data)\n\n print(\"SERVICE:\", SERVICE )\n\n # Get access_token\n headers = {}\n print('Context Headers:', dict(context['headers'][0]))\n #headers = {'headers': update_dict(headers, context_override=dict(context['headers'][0]))}\n headers = update_dict(headers, context_override=dict(context['headers'][0]))\n print(\"Headers:\", headers)\n\n kw_to_send = {'data': data, 'headers': headers}\n\n #session = SERVICE.get_auth_session(method=\"POST\",**kw_to_send)\n #session = SERVICE.get_session(get_access(state))\n #session = SERVICE.get_raw_access_token(method=\"POST\", **kw_to_send)\n session = SERVICE.get_raw_access_token(data=data)\n\n #response = SERVICE.get_access_token(method=\"POST\")\n # response = SERVICE.get_auth_session(data=data)\n print(\"Auth Session\", session)\n #response = SERVICE.get_raw_access_token(data=data, **headers)\n\n get_text = session.json()\n\n if 'access_token' in get_text:\n print(\"got an access token\")\n access = save_tokens(state,\n get_text['access_token'],\n get_text['refresh_token'])\n\n print(\"RESPONSE:\", get_text)\n # RESPONSE: {\"expires_in\": 36000,\n # \"access_token\": \"h1vY5eDu69JKfV4nPpdu8xEan63hKl\",\n # \"scope\": \"patient/*.read write_consent\",\n # \"token_type\": \"Bearer\",\n # \"refresh_token\": \"6HZnSwhfsGvfr9Aguw5n0e5CoGr8CQ\"}\n\n\n sesn = SERVICE.get_session(get_text['access_token'])\n print(\"SESSION:\", sesn)\n\n r = sesn.get(context['url'] + context['ask'])\n\n if settings.DEBUG:\n print(\"R:\", r.content)\n\n return r",
"def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"",
"def registr(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"Bio of %s: %s\", user.first_name, update.message.text)\n update.message.reply_text('REGISTRAZIONE DA IMPLEMENTARE, alla prossima.')\n\n return ConversationHandler.END",
"def _post_researcher(current_time, res=None):\n\n if res is None:\n res = api_types.Researcher(\n f\"Test-{current_time}\",\n \"Last Name\",\n f\"{LIMS_API.tools.api.host}labs/1\",\n \"[email protected]\",\n \"\")\n\n template_path = (os.path.join(\n os.path.split(__file__)[0], \"post_researcher_template.xml\"))\n with open(template_path, 'r') as file:\n template = Template(file.read())\n response_xml = template.render(\n first_name=res.first_name,\n last_name=res.last_name,\n lab_uri=res.lab_type,\n email=res.email)\n url = f\"{LIMS_API.tools.api.host}researchers\"\n return LIMS_API.tools.api.post(url, response_xml)",
"def set_response(self, response_str):\r\n input_css = \"textarea.short-form-response\"\r\n self.q(css=input_css).fill(response_str)",
"def _help(update, context):\n message = '''This bot will fetch data from some public APIs, insert fetched data into \\\nGoogle spreadsheets and send url of the spreadsheet to the user. \\n\n/fetch - fetch data and return url of spreadsheet.\n/help - return help message'''\n update.message.reply_text(message)",
"def send_to_regist():\n\treturn render_template(\"/registration.html\")",
"def updateResourceDef(url, user, pWd, resourceName, resJson):\n \n print(\"\\tupdating resource for catalog:-\" + url + \" resource=\" + \n resourceName + ' user=' + user)\n print(\"\\t\" + json.dumps(resJson))\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"} \n tResp = requests.put(apiURL, data=json.dumps(resJson), headers=header, \n auth=HTTPBasicAuth(user, pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n print(\"\\tyay - update resource worked...\")\n print(tResp)\n return tResp.status_code\n else:\n # not valid\n print(\"\\tdarn - update resource failed...\")\n print(tResp)\n return tResp.status_code",
"def define(update, context):\n word = update.message.text\n output = make_output(word)\n if output:\n response_message = output\n else:\n response_message = 'Sorry, I was unable to complete that request.'\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=response_message)"
] | [
"0.56080025",
"0.54728335",
"0.5336991",
"0.53315264",
"0.52677256",
"0.5175285",
"0.51725364",
"0.51725364",
"0.5153644",
"0.512804",
"0.5109952",
"0.50710225",
"0.5068613",
"0.50233895",
"0.50152797",
"0.5004218",
"0.49749124",
"0.49616578",
"0.49452564",
"0.49318555",
"0.49236748",
"0.49102622",
"0.48343807",
"0.48319653",
"0.4818849",
"0.48187453",
"0.48142958",
"0.48131743",
"0.48022628",
"0.47808266"
] | 0.5913376 | 0 |
Get the remote support configuration details [Example] ${resp} = Fusion Api Get Configuration | | | | def fusion_api_get_configuration(self, uri=None, param='', api=None, headers=None):
return self.configuration.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(req):\n #try:\n # user_id = req.user\n #except KeyError as e:\n # msg = req.get_error_msg(e)\n # return send_error_response(msg)\n try:\n config = tools_config_get_config(req)\n except Exception:\n raise http_exc.HTTPClientError()\n else:\n return Response(json_body=json.dumps(config), content_type='application/json')",
"def get(self, request, format=None):\n return Response({k: getattr(config, k) for k in list(dir(config))})",
"def processGetConfig(self, msg):\r\n resp = MsgHelper.createResponse(Messages.RSP_GET_CONFIG, msg)\r\n resp[RunInto] = self.runInto\r\n resp[ExecDelay] = self.execDelay\r\n resp[ByStep] = self.stepByStep\r\n return resp",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def _get_config():\n resp = requests.get(TRAEFIK_API_URL)\n if not resp.ok:\n raise Exception(\n \"Bad traefik response: %s %s\" % (resp.status_code, resp.text)\n )\n return resp.json()",
"def configuration_info(self) -> Optional['outputs.ConfigurationInfoResponse']:\n return pulumi.get(self, \"configuration_info\")",
"def config_get():\n server_config = db.get().server_config_get()\n\n if not server_config:\n return flask.jsonify({\n \"message\": \"Netmet server has not been setup yet\"}), 404\n\n return flask.jsonify(server_config), 200",
"def config(self) -> 'outputs.DeviceConfigResponse':\n return pulumi.get(self, \"config\")",
"def getCampaignConfig(docName, url=reqmgr_url):\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\"}\n conn = make_x509_conn(url)\n url = '/reqmgr2/data/campaignconfig/%s' % docName\n conn.request(\"GET\", url, headers=headers)\n r2 = conn.getresponse()\n data = json.loads(r2.read())\n return data['result']",
"def config(self) -> pulumi.Output['outputs.ConfigResponse']:\n return pulumi.get(self, \"config\")",
"def get(url, **_):\n # Checks input parameters\n assert '/configuration/%s' % dummy_id in url\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n response.status_code = 200\n return response",
"def get_config():\n return {'address': ADDRESS, 'https': HTTPS == 'https',\n 'password': PASSWORD, 'username': USERNAME,\n 'port': PORT, 'version': VERSION}",
"async def test_api_get_config(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(const.URL_API_CONFIG)\n result = await resp.json()\n if \"components\" in result:\n result[\"components\"] = set(result[\"components\"])\n if \"whitelist_external_dirs\" in result:\n result[\"whitelist_external_dirs\"] = set(result[\"whitelist_external_dirs\"])\n if \"allowlist_external_dirs\" in result:\n result[\"allowlist_external_dirs\"] = set(result[\"allowlist_external_dirs\"])\n if \"allowlist_external_urls\" in result:\n result[\"allowlist_external_urls\"] = set(result[\"allowlist_external_urls\"])\n\n assert hass.config.as_dict() == result",
"def get(self, session: Session = None) -> Response:\n return jsonify(self.manager.config)",
"def get_api_config():\n\n try:\n base_url = os.environ['FLEXNOW_API_BASE_URL']\n except KeyError:\n base_url = \"https://flexnow-uat.eu.flextrade.com/api/v3\"\n\n try:\n client_id = os.environ['FLEXNOW_API_CLIENT_ID']\n secret_token = os.environ['FLEXNOW_API_SECRET_TOKEN']\n except KeyError as e:\n print(f\"Environment {e} must be set\")\n sys.exit(1)\n\n return {\n \"base_url\": base_url,\n \"client_id\": client_id,\n \"secret_token\": secret_token\n }",
"def GetConfig(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def config():\n return {\n \"CLEAN_OUTBOX\": \"TRUE\",\n \"COMPONENT_NAME\": \"testing-unpacker\",\n \"DEST_SITE\": \"WIPAC\",\n \"FILE_CATALOG_REST_TOKEN\": \"fake-file-catalog-token\",\n \"FILE_CATALOG_REST_URL\": \"http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"unpacking\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"completed\",\n \"PATH_MAP_JSON\": \"/tmp/lta/testing/path_map.json\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"NERSC\",\n \"UNPACKER_OUTBOX_PATH\": \"/tmp/lta/testing/unpacker/outbox\",\n \"UNPACKER_WORKBOX_PATH\": \"/tmp/lta/testing/unpacker/workbox\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }",
"def get_config():\n return CONFIG",
"def get_cfg (conn, url):\n\n res = []\n\n try:\n csr = conn.cursor()\n\n cmd = \"SELECT * FROM {tbl} WHERE {col1} = \\\"{val1}\\\";\".\\\n format(tbl = _tbl_config,\n col1 = _tbl_config_col1, val1 = url)\n print(cmd)\n\n csr.execute(cmd)\n\n for row in csr:\n res.append(row)\n\n csr.close()\n\n except Exception as ex:\n print(\"Error - get_cfg: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok, res",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get(self, session: Session = None) -> Response:\n with open(self.manager.config_path, encoding='utf-8') as f:\n raw_config = base64.b64encode(f.read().encode(\"utf-8\"))\n return jsonify(raw_config=raw_config.decode('utf-8'))",
"def output_config() -> Response:\n c = dict(config)\n c['password'] = \"*********\"\n return jsonify(c)",
"def get(request_url, **_):\n # Checks input parameters\n assert '/configuration' in request_url\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n return response",
"def _config_path(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n return res['path']",
"def settings_config(session, return_type=None, **kwargs):\n path = '/api/return_type.json'\n return session.get_api(path=path, return_type=return_type, **kwargs)",
"def get_vpsa_config(session, return_type=None, **kwargs):\n path = '/api/config.json'\n\n return session.get_api(path=path, return_type=return_type, **kwargs)",
"def get_ha_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/high-availability\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_config():\n\n return json.loads(CONFIG_FILE.read_text())",
"def fusion_api_get_email_config(self, api=None, headers=None, param=''):\n return self.email.get(api=api, headers=headers, param=param)",
"def getConfig(self, cfg_path, var_path=''):\n return self.ce_proxy.getConfig(self.userName, cfg_path, var_path)"
] | [
"0.66445255",
"0.61464477",
"0.614004",
"0.6097132",
"0.6057521",
"0.6036706",
"0.6006283",
"0.5937391",
"0.5936521",
"0.590213",
"0.5879762",
"0.5877101",
"0.5839141",
"0.58137393",
"0.5808278",
"0.57864505",
"0.5757012",
"0.5752552",
"0.57210034",
"0.5696712",
"0.5687088",
"0.5680289",
"0.56747484",
"0.56558675",
"0.5635165",
"0.5633432",
"0.56289595",
"0.56124777",
"0.5611068",
"0.56088144"
] | 0.62298936 | 1 |
Create OS Deployment Server. [Arguments] | def fusion_api_create_os_deploymentserver(self, body, api=None, headers=None):
return self.osds.create(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_server(DisableAutomatedBackup=None, Engine=None, EngineModel=None, EngineVersion=None, EngineAttributes=None, BackupRetentionCount=None, ServerName=None, InstanceProfileArn=None, InstanceType=None, KeyPair=None, PreferredMaintenanceWindow=None, PreferredBackupWindow=None, SecurityGroupIds=None, ServiceRoleArn=None, SubnetIds=None, BackupId=None):\n pass",
"def create_new_server(flavor=None, image=None, key_name=None, name=None, size=100):\r\n #server = nova.servers.create(name=name, flavor=flavor.id,\r\n # image=image.id, key_name=key_name)\r\n kwargs = {}\r\n if flavor.disk == 0:\r\n\tblock_device_mapping_v2 = [{\r\n 'boot_index': '0',\r\n 'delete_on_termination': True,\r\n 'destination_type': 'volume',\r\n 'uuid': image.id,\r\n 'source_type': 'image',\r\n 'volume_size': str(size),\r\n }]\r\n\tkwargs['block_device_mapping_v2'] = block_device_mapping_v2\r\n\timage = None\r\n \r\n server = nova.servers.create(name, image, flavor, key_name=key_name, **kwargs)\r\n \r\n print 'Building, {0} please wait...'.format(name)\r\n\r\n # wait for server create to be complete\r\n pyrax.utils.wait_until(server, \"status\", \"ACTIVE\", interval=3, attempts=0,verbose=True)\r\n print 'Building, {0} please wait...'.format(name)\r\n\r\n # wait for server create to be complete\r\n while server.status == 'BUILD':\r\n time.sleep(5)\r\n server = nova.servers.get(server.id) # refresh server\r\n \r\n # check for errors\r\n if server.status != 'ACTIVE':\r\n raise RuntimeError('Server did not boot, status=' + server.status)\r\n \r\n # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address\r\n ip_address = None\r\n for network in server.networks['public']:\r\n if re.match('\\d+\\.\\d+\\.\\d+\\.\\d+', network):\r\n ip_address = network\r\n break\r\n if ip_address is None:\r\n raise RuntimeError('No IP address assigned!')\r\n print 'Server is running at IP address ' + ip_address\r\n return ip_address",
"def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv",
"def cmd_apps__create(args):\n \n if args.name is None:\n args.name = os.path.basename(os.getcwd())\n\n url = remote.create_project(args.name)\n \n if in_git_repo():\n if get_push_url('tinyserv') is None:\n git(None, 'remote', 'add', 'tinyserv', url)\n print \"Added remote 'tinyserv'.\"\n else:\n print \"This repository is already configured for app '%s'.\" % \\\n _get_current_project_name()\n \n print \"Remote repository URL is %s.\" % url",
"def create_deployment(StackId=None, AppId=None, InstanceIds=None, LayerIds=None, Command=None, Comment=None, CustomJson=None):\n pass",
"def setup_server():\n\n require('environment', provided_by=env.environments)\n upgrade_packages()\n # Install required system packages for deployment, plus some extras\n # Install pip, and use it to install virtualenv\n install_packages()\n sudo(\"easy_install -i http://d.pypi.python.org/simple -U pip\")\n sudo(\"pip install -i http://d.pypi.python.org/simple -U virtualenv\")\n create_postgis_template()\n create_db_user()\n create_db()\n create_webserver_user()",
"def create_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Creating an instance of tcServer in %s\" % tcserver_dir)\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-instance.sh\", \"create\", instance_name])\n popdir()",
"def create_service(server: Arma3Server):\n file_name = get_service_file_name(server.id)\n user = Settings.local_steam_user\n\n content = \"[Unit]\\nDescription=Arma 3 Server\\n\\n[Service]\\nUser=\"\n content += user\n content += \"\\nGroup=\" + user\n content += \"\\nWorkingDirectory=/home/\" + user\n content += \"\\nExecStart=/bin/bash \" + get_startup_script_file_name(server.id)\n content += \"\\nRestart=always\\n\\n[Install]\\nWantedBy=multi-user.target\\n\"\n\n with open(file_name, 'w') as f:\n f.write(content)\n\n if Settings.debug_windows:\n logger.info(\"windows create service dummy\")\n return\n\n subprocess.check_call([\"sudo\", \"systemctl\", \"daemon-reload\"])",
"def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)",
"def provision_server(self, body):\n if not body:\n raise AssertionError(\"Payload cannot be empty\")\n\n self.nodes = len(body.get('nodes')) if body.get('os') else 1\n\n _cmd = 'mktemp -d'\n workspace = self._remote_cmd(_cmd).get('output')\n xml = self._pre_tasks(body, workspace)\n log = workspace + '/' + 'rg_cpt_deploy.log'\n\n _bin = '/usr/bin/nohup /usr/bin/l2add'\n _cmd = '{} -f {} -c y -r > {} 2>&1 &'.format(_bin, xml, log)\n\n if self._remote_cmd(_cmd, block=False).get('output') is None:\n raise AssertionError(\"Error encountered during provisioning\")\n\n return log",
"def create_server(self, image_name, flavor_name, net_name,\n server_name, **kwargs):\n\n LOG_OBJ.debug(\"Launching server...\")\n\n net_ids = kwargs.get(\"net_ids\", [])\n if not net_ids:\n net_id = self.get_net_id(net_name)\n if not isinstance(net_id, unicode):\n LOG_OBJ.error(\"Problem while getting net_id corresponding\"\n \" to net:%s\" % net_name)\n return\n net_ids.append(net_id)\n\n if not isinstance(net_ids, list):\n net_ids = [net_ids]\n LOG_OBJ.debug(\"Net Name: %s or NetID: %s\" % (net_name, net_ids))\n\n host = kwargs.get('host_name', \"\")\n if host != \"\":\n host = \"nova:\" + host\n\n port_ids = kwargs.get('port_ids', [])\n if not port_ids:\n for net_id in net_ids:\n port_name = server_name + \"_\" + str(net_id)[:5] + \"_port\"\n port_id = self.create_port(net_name, port_name,\n net_id=net_id)\n LOG_OBJ.debug(\"portId is %s\" % port_id)\n if not isinstance(port_id, unicode):\n return\n port_ids.append(port_id)\n\n if not isinstance(port_ids, list):\n port_ids = [port_ids]\n\n boot_nic = []\n for port_id, net_id in zip(port_ids, net_ids):\n boot_nic.append({\"uuid\": net_id, \"port\": port_id})\n\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n # Get the image id.\n image_id = self.get_image_id(image_name)\n if not isinstance(image_id, unicode):\n LOG_OBJ.error(\"Problem while getting image_id corresponding\"\n \" to imageName:%s\" % image_name)\n return\n # GEt the flavor id\n flavor_id = self.get_flavor_id(flavor_name)\n if not isinstance(flavor_id, unicode):\n LOG_OBJ.error(\"Problem while getting flavor_id corresponding\"\n \" to flavorName:%s\" % flavor_name)\n return\n\n _server_info = {\"server\": {\n \"name\": server_name,\n \"imageRef\": image_id,\n \"flavorRef\": flavor_id,\n \"max_count\": 1,\n # \"availability_zone\": host,\n \"min_count\": 1,\n \"networks\": boot_nic\n }}\n\n if host:\n _server_info['server']['availability_zone'] = host\n\n _body = json.dumps(_server_info)\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\n \"Unable to get the response from server while creating VM\")\n return\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create Server Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Server details : %s \" % output)\n\n server_id = output['server']['id']\n LOG_OBJ.debug(\"Server Details: %s\" % output['server'])\n # Default is poll on the server status.\n if kwargs.get('poll_on_status', True):\n out = self.poll_on_server_boot_up(server_id)\n LOG_OBJ.info(\"-> Out: %s, type= %s\" % (out, type(out)))\n if not isinstance(out, unicode):\n return out\n # Default is \"do not return the details\"\n if kwargs.get('return_details', False):\n return output['server']\n\n return server_id",
"def deploy(config, args):\n log = logging.getLogger('kraftwerk.deploy')\n \n # TODO better way to detect new, or maybe move to dedicated command\n stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)\n new = bool(stderr) or args.override\n \n # Sync codebase over with the web user\n destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)\n stdout, stderr = args.project.rsync(destination)\n if stderr:\n log.error(\"Sync error: %s\" % stderr)\n sys.exit(stderr)\n \n # Copy requirements\n args.project.copy(args.node, 'requirements.txt')\n \n # Put together the setup script\n cmd = config.template(\"scripts/project_setup.sh\", \n project=args.project, new=new, \n upgrade_packages=args.upgrade_packages)\n stdout, stderr = args.node.ssh(cmd, pipe=True)\n if stderr:\n print stderr\n \n # TODO detect new services\n if not args.no_service_setup and new:\n for service in args.project.services():\n args.node.ssh(service.setup_script)\n \n print u\"%s live at %r\" % (args.project.canonical_domain(), args.node.hostname)",
"def createManagedDomain():\n selectCustomTemplate(localTemplate)\n loadTemplates()\n # set the Node Manager listen address and listen port.\n cd('/')\n cd('NMProperties')\n set('ListenAddress', hostname)\n #create the domain\n writeDomain(domainPath)",
"def create(ctx, nova_client, **kwargs):\n\n # For possible changes by _maybe_transform_userdata()\n\n server = {\n 'name': ctx.node_id\n }\n server.update(copy.deepcopy(ctx.properties['server']))\n\n ctx.logger.debug(\n \"server.create() server before transformations: {0}\".format(server))\n\n if server.get('nics'):\n raise ValueError(\"Parameter with name 'nics' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n _maybe_transform_userdata(server)\n\n if ('management_network_name' in ctx.properties) and ctx.properties['management_network_name']:\n nc = os_common.NeutronClient().get(config=ctx.properties.get('neutron_config'))\n managemenet_network_id = nc.cosmo_get_named('network', ctx.properties['management_network_name'])['id']\n server['nics'] = [{'net-id': managemenet_network_id}]\n else:\n managemenet_network_id = None\n # print(server['nics'])\n\n # Sugar\n if 'image_name' in server:\n server['image'] = nova_client.images.find(name=server['image_name']).id\n del server['image_name']\n if 'flavor_name' in server:\n server['flavor'] = nova_client.flavors.find(name=server['flavor_name']).id\n del server['flavor_name']\n\n _fail_on_missing_required_parameters(\n server,\n ('name', 'flavor', 'image', 'key_name'),\n 'server')\n\n # Multi-NIC by networks - start\n network_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if network_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'net-id': n['external_id']}\n for n in network_nodes_runtime_properties\n if neutron_client.cosmo_is_network(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by networks - end\n\n # Multi-NIC by ports - start\n port_nodes_runtime_properties = ctx.capabilities.get_all().values()\n if port_nodes_runtime_properties and 'management_network_name' not in ctx.properties:\n # Known limitation\n raise RuntimeError(\"Nova server with multi-NIC requires 'management_network_name' which was not supplied\")\n nics = [\n {'port-id': n['external_id']}\n for n in port_nodes_runtime_properties\n if neutron_client.cosmo_is_port(n['external_id'])\n ]\n if nics:\n server['nics'] = server.get('nics', []) + nics\n # Multi-NIC by ports - end\n\n ctx.logger.debug(\n \"server.create() server after transformations: {0}\".format(server))\n\n # First parameter is 'self', skipping\n params_names = inspect.getargspec(nova_client.servers.create).args[1:]\n\n params_default_values = inspect.getargspec(\n nova_client.servers.create).defaults\n params = dict(itertools.izip(params_names, params_default_values))\n\n # Fail on unsupported parameters\n for k in server:\n if k not in params:\n raise ValueError(\"Parameter with name '{0}' must not be passed to\"\n \" openstack provisioner (under host's \"\n \"properties.nova.instance)\".format(k))\n\n for k in params:\n if k in server:\n params[k] = server[k]\n\n if not params['meta']:\n params['meta'] = dict({})\n params['meta']['cloudify_id'] = ctx.node_id\n params['meta']['cloudify_management_network_id'] = managemenet_network_id\n params['meta']['cloudify_management_network_name'] = ctx.properties.get('management_network_name')\n\n ctx.logger.info(\"Asking Nova to create server.\"\n \"Parameters: {0}\".format(str(params)))\n ctx.logger.debug(\"Asking Nova to create server. All possible parameters are: \"\n \"{0})\".format(','.join(params.keys())))\n\n try:\n s = nova_client.servers.create(**params)\n except nova_exceptions.BadRequest as e:\n # ctx.logger.error(e)\n if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT):\n raise RuntimeError(\n \"Can not provision server: management_network_name is not \"\n \"specified but there are several networks that the server \"\n \"can be connected to.\"\n )\n raise RuntimeError(\"Nova bad request error: \" + str(e))\n # os.system(\"nova show \" + s.id)\n ctx['external_id'] = s.id",
"def create_xml_server(self, server, dev_list, server_metadata={}):\n \n #get if operating system is Windows \n windows_os = False\n os_type = server_metadata.get('os_type', None)\n if os_type == None and 'metadata' in dev_list[0]:\n os_type = dev_list[0]['metadata'].get('os_type', None)\n if os_type != None and os_type.lower() == \"windows\":\n windows_os = True\n #get type of hard disk bus \n bus_ide = True if windows_os else False \n bus = server_metadata.get('bus', None)\n if bus == None and 'metadata' in dev_list[0]:\n bus = dev_list[0]['metadata'].get('bus', None)\n if bus != None:\n bus_ide = True if bus=='ide' else False\n \n self.xml_level = 0\n\n text = \"<domain type='kvm'>\"\n #get topology\n topo = server_metadata.get('topology', None)\n if topo == None and 'metadata' in dev_list[0]:\n topo = dev_list[0]['metadata'].get('topology', None)\n #name\n name = server.get('name','') + \"_\" + server['uuid']\n name = name[:58] #qemu impose a length limit of 59 chars or not start. Using 58\n text += self.inc_tab() + \"<name>\" + name+ \"</name>\"\n #uuid\n text += self.tab() + \"<uuid>\" + server['uuid'] + \"</uuid>\" \n \n numa={}\n if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:\n numa = server['extended']['numas'][0]\n #memory\n use_huge = False\n memory = int(numa.get('memory',0))*1024*1024 #in KiB\n if memory==0:\n memory = int(server['ram'])*1024;\n else:\n if not self.develop_mode:\n use_huge = True\n if memory==0:\n return -1, 'No memory assigned to instance'\n memory = str(memory)\n text += self.tab() + \"<memory unit='KiB'>\" +memory+\"</memory>\" \n text += self.tab() + \"<currentMemory unit='KiB'>\" +memory+ \"</currentMemory>\"\n if use_huge:\n text += self.tab()+'<memoryBacking>'+ \\\n self.inc_tab() + '<hugepages/>'+ \\\n self.dec_tab()+ '</memoryBacking>'\n\n #cpu\n use_cpu_pinning=False\n vcpus = int(server.get(\"vcpus\",0))\n cpu_pinning = []\n if 'cores-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['cores-source'])):\n cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )\n vcpus += 1\n if 'threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['threads-source'])):\n cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )\n vcpus += 1\n if 'paired-threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['paired-threads-source'])):\n cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )\n cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )\n vcpus += 2\n \n if use_cpu_pinning and not self.develop_mode:\n text += self.tab()+\"<vcpu placement='static'>\" +str(len(cpu_pinning)) +\"</vcpu>\" + \\\n self.tab()+'<cputune>'\n self.xml_level += 1\n for i in range(0, len(cpu_pinning)):\n text += self.tab() + \"<vcpupin vcpu='\" +str(cpu_pinning[i][0])+ \"' cpuset='\" +str(cpu_pinning[i][1]) +\"'/>\"\n text += self.dec_tab()+'</cputune>'+ \\\n self.tab() + '<numatune>' +\\\n self.inc_tab() + \"<memory mode='strict' nodeset='\" +str(numa['source'])+ \"'/>\" +\\\n self.dec_tab() + '</numatune>'\n else:\n if vcpus==0:\n return -1, \"Instance without number of cpus\"\n text += self.tab()+\"<vcpu>\" + str(vcpus) + \"</vcpu>\"\n\n #boot\n boot_cdrom = False\n for dev in dev_list:\n if dev['type']=='cdrom' :\n boot_cdrom = True\n break\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='x86_64' machine='pc'>hvm</type>\"\n if boot_cdrom:\n text += self.tab() + \"<boot dev='cdrom'/>\" \n text += self.tab() + \"<boot dev='hd'/>\" + \\\n self.dec_tab()+'</os>'\n #features\n text += self.tab()+'<features>'+\\\n self.inc_tab()+'<acpi/>' +\\\n self.tab()+'<apic/>' +\\\n self.tab()+'<pae/>'+ \\\n self.dec_tab() +'</features>'\n if windows_os or topo==\"oneSocket\":\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>\"% vcpus\n else:\n text += self.tab() + \"<cpu mode='host-model'></cpu>\"\n text += self.tab() + \"<clock offset='utc'/>\" +\\\n self.tab() + \"<on_poweroff>preserve</on_poweroff>\" + \\\n self.tab() + \"<on_reboot>restart</on_reboot>\" + \\\n self.tab() + \"<on_crash>restart</on_crash>\"\n text += self.tab() + \"<devices>\" + \\\n self.inc_tab() + \"<emulator>/usr/libexec/qemu-kvm</emulator>\" + \\\n self.tab() + \"<serial type='pty'>\" +\\\n self.inc_tab() + \"<target port='0'/>\" + \\\n self.dec_tab() + \"</serial>\" +\\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='serial' port='0'/>\" + \\\n self.dec_tab()+'</console>'\n if windows_os:\n text += self.tab() + \"<controller type='usb' index='0'/>\" + \\\n self.tab() + \"<controller type='ide' index='0'/>\" + \\\n self.tab() + \"<input type='mouse' bus='ps2'/>\" + \\\n self.tab() + \"<sound model='ich6'/>\" + \\\n self.tab() + \"<video>\" + \\\n self.inc_tab() + \"<model type='cirrus' vram='9216' heads='1'/>\" + \\\n self.dec_tab() + \"</video>\" + \\\n self.tab() + \"<memballoon model='virtio'/>\" + \\\n self.tab() + \"<input type='tablet' bus='usb'/>\" #TODO revisar\n\n#> self.tab()+'<alias name=\\'hostdev0\\'/>\\n' +\\\n#> self.dec_tab()+'</hostdev>\\n' +\\\n#> self.tab()+'<input type=\\'tablet\\' bus=\\'usb\\'/>\\n'\n if windows_os:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes'/>\"\n else:\n #If image contains 'GRAPH' include graphics\n #if 'GRAPH' in image:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>\" +\\\n self.inc_tab() + \"<listen type='address' address='0.0.0.0'/>\" +\\\n self.dec_tab() + \"</graphics>\"\n\n vd_index = 'a'\n for dev in dev_list:\n bus_ide_dev = bus_ide\n if dev['type']=='cdrom' or dev['type']=='disk':\n if dev['type']=='cdrom':\n bus_ide_dev = True\n text += self.tab() + \"<disk type='file' device='\"+dev['type']+\"'>\"\n if 'file format' in dev:\n text += self.inc_tab() + \"<driver name='qemu' type='\" +dev['file format']+ \"' cache='none'/>\"\n if 'source file' in dev:\n text += self.tab() + \"<source file='\" +dev['source file']+ \"'/>\"\n #elif v['type'] == 'block':\n # text += self.tab() + \"<source dev='\" + v['source'] + \"'/>\"\n #else:\n # return -1, 'Unknown disk type ' + v['type']\n vpci = dev.get('vpci',None)\n if vpci == None:\n vpci = dev['metadata'].get('vpci',None)\n text += self.pci2xml(vpci)\n \n if bus_ide_dev:\n text += self.tab() + \"<target dev='hd\" +vd_index+ \"' bus='ide'/>\" #TODO allows several type of disks\n else:\n text += self.tab() + \"<target dev='vd\" +vd_index+ \"' bus='virtio'/>\" \n text += self.dec_tab() + '</disk>'\n vd_index = chr(ord(vd_index)+1)\n elif dev['type']=='xml':\n dev_text = dev['xml']\n if 'vpci' in dev:\n dev_text = dev_text.replace('__vpci__', dev['vpci'])\n if 'source file' in dev:\n dev_text = dev_text.replace('__file__', dev['source file'])\n if 'file format' in dev:\n dev_text = dev_text.replace('__format__', dev['source file'])\n if '__dev__' in dev_text:\n dev_text = dev_text.replace('__dev__', vd_index)\n vd_index = chr(ord(vd_index)+1)\n text += dev_text\n else:\n return -1, 'Unknown device type ' + dev['type']\n\n net_nb=0\n bridge_interfaces = server.get('networks', [])\n for v in bridge_interfaces:\n #Get the brifge name\n self.db_lock.acquire()\n result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )\n self.db_lock.release()\n if result <= 0:\n print \"create_xml_server ERROR getting nets\",result, content\n return -1, content\n #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM\n #I know it is not secure \n #for v in sorted(desc['network interfaces'].itervalues()):\n model = v.get(\"model\", None)\n if content[0]['provider']=='default':\n text += self.tab() + \"<interface type='network'>\" + \\\n self.inc_tab() + \"<source network='\" +content[0]['provider']+ \"'/>\"\n elif content[0]['provider'][0:7]=='macvtap':\n text += self.tab()+\"<interface type='direct'>\" + \\\n self.inc_tab() + \"<source dev='\" + self.get_local_iface_name(content[0]['provider'][8:]) + \"' mode='bridge'/>\" + \\\n self.tab() + \"<target dev='macvtap0'/>\"\n if windows_os:\n text += self.tab() + \"<alias name='net\" + str(net_nb) + \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:6]=='bridge':\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.get_local_iface_name(content[0]['provider'][7:])+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n elif model==None:\n model = \"virtio\"\n else:\n return -1, 'Unknown Bridge net provider ' + content[0]['provider']\n if model!=None:\n text += self.tab() + \"<model type='\" +model+ \"'/>\"\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n \n net_nb += 1\n\n interfaces = numa.get('interfaces', [])\n\n net_nb=0\n for v in interfaces:\n if self.develop_mode: #map these interfaces to bridges\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.develop_bridge_iface+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n else:\n text += self.tab() + \"<model type='e1000'/>\" #e1000 is more probable to be supported than 'virtio'\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n continue\n \n if v['dedicated'] == 'yes': #passthrought\n text += self.tab() + \"<hostdev mode='subsystem' type='pci' managed='yes'>\" + \\\n self.inc_tab() + \"<source>\"\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</hostdev>'\n net_nb += 1\n else: #sriov_interfaces\n #skip not connected interfaces\n if v.get(\"net_id\") == None:\n continue\n text += self.tab() + \"<interface type='hostdev' managed='yes'>\"\n self.inc_tab()\n if v.get('mac_address', None) != None:\n text+= self.tab() + \"<mac address='\" +v['mac_address']+ \"'/>\"\n text+= self.tab()+'<source>'\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n if v.get('vlan',None) != None:\n text += self.tab() + \"<vlan> <tag id='\" + str(v['vlan']) + \"'/> </vlan>\"\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</interface>'\n\n \n text += self.dec_tab()+'</devices>'+\\\n self.dec_tab()+'</domain>'\n return 0, text",
"def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))",
"def deploy(ctx, domain, dns_option,docker):\n global DOMAIN, USR, CUR_LOC\n usr = getpass.getuser()\n loc = os.path.join(os.getcwd(), domain)\n DOMAIN, USR, CUR_LOC = domain, usr, loc\n\n if not os.path.exists(CUR_LOC):\n try:\n os.makedirs(CUR_LOC)\n except:\n if click.confirm(\"You have no privilege of current location Would you like to own it?\"):\n subprocess.call(['sudo', 'chown', '-R', usr+\":\"+usr, './'])\n os.makedirs(loc)\n else:\n click.echo(\"You have no previlege!!!\")\n return\n\n uwsgi_file_gen(DOMAIN, USR, CUR_LOC)\n nginx_file_gen(DOMAIN, USR, CUR_LOC)\n service_file_gen(DOMAIN, USR, CUR_LOC)\n\n if not docker:\n if not click.confirm('Do you have database already?'):\n docker_file_gen(DOMAIN, USR, CUR_LOC)\n if not dns_option:\n if not click.confirm('Do you have SSL certification?'):\n try:\n # miss_tmp()\n miss_ssl()\n except JumpOutFuckingClick:\n click.echo(\"<_@,@_<\")\n else:\n click.echo(\"ss\"+dns_option)\n if(str(dns_option)==\"1\"):\n try:\n op_cf()\n except JumpOutFuckingClick2:\n click.echo(\"<_@,@_<2\")\n if(str(dns_option)==\"2\"):\n try:\n op_ali()\n except JumpOutFuckingClick2:\n click.echo(\"<_@,@_<2\")\n \n click.echo(\"It's deployed. Fake\")",
"def create_server(self, *server_args, **server_kwargs):\n server_kwargs.setdefault('lease', self)\n server = Server(self.session, *server_args, **server_kwargs)\n self.servers.append(server)\n return server",
"def create_app(StackId=None, Shortname=None, Name=None, Description=None, DataSources=None, Type=None, AppSource=None, Domains=None, EnableSsl=None, SslConfiguration=None, Attributes=None, Environment=None):\n pass",
"def create(self,\n name=None,\n image=None,\n network=None,\n size=None,\n location=None,\n timeout=360,\n key=None,\n secgroup=None,\n ip=None,\n user=None,\n public=True,\n group=None,\n metadata=None,\n cloud=None,\n label=None,\n **kwargs):\n image_use = None\n flavor_use = None\n\n # keyname = Config()[\"cloudmesh\"][\"profile\"][\"user\"]\n # ex_keyname has to be the registered keypair name in cloud\n\n \"\"\"\n https://docs.openstack.org/openstacksdk/latest/user/connection.html#openstack.connection.Connection.create_server\n\n \"\"\"\n\n if 'flavor' in kwargs and size is None:\n size = kwargs['flavor']\n\n if network is not None:\n pass\n elif 'network' in kwargs:\n network = kwargs['network']\n elif 'network' in self.default:\n network = self.default['network']\n\n # Guess user name\n\n if user is None:\n user = Image.guess_username(image)\n # image_name = image.lower()\n # if image_name.startswith(\"cc-\"):\n # user = \"cc\"\n # if \"centos\" in image_name:\n # user = \"centos\"\n # elif \"ubuntu\" in image_name:\n # user = \"ubuntu\"\n\n # get IP\n\n if not ip and public:\n ip = self.find_available_public_ip()\n # pprint(entry)\n\n elif ip is not None:\n entry = self.list_public_ips(ip=ip, available=True)\n if len(entry) == 0:\n print(\"ip not available\")\n raise ValueError(f\"The ip can not be assigned {ip}\")\n\n if type(group) == str:\n groups = Parameter.expand(group)\n\n vm_label = label or name\n\n\n banner(\"Create Server\")\n Console.msg(f\" Cloud: {self.cloud}\")\n Console.msg(f\" Name: {name}\")\n Console.msg(f\" Label: {vm_label}\")\n Console.msg(f\" User: {user}\")\n Console.msg(f\" IP: {ip}\")\n Console.msg(f\" Image: {image}\")\n Console.msg(f\" Size: {size}\")\n Console.msg(f\" Network: {network}\")\n Console.msg(f\" Public: {public}\")\n Console.msg(f\" Key: {key}\")\n Console.msg(f\" Location: {location}\")\n Console.msg(f\" Timeout: {timeout}\")\n Console.msg(f\" Secgroup: {secgroup}\")\n Console.msg(f\" Group: {group}\")\n Console.msg(f\" Groups: {groups}\")\n Console.msg(\"\")\n\n # image = self.cloudman.compute.find_image(image)\n # flavor = self.cloudman.compute.find_flavor(size)\n # network = self.cloudman.network.find_network(network)\n\n try:\n server = self.cloudman.create_server(name,\n network=network,\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n\n \"\"\"\n server = self.cloudman.create_server(name,\n networks=[\n {\"uuid\": \"0fa8824d-8a3f-4890-90e1-c3596b3511c6\"}],\n flavor=size,\n image=image,\n key_name=key,\n security_groups=[secgroup],\n timeout=timeout\n # tags=groups,\n # wait=True\n )\n \"\"\"\n server['user'] = user\n server = self.cloudman.wait_for_server(server)\n server = self.cloudman.add_ips_to_server(server, ips=ip)\n variables = Variables()\n variables['vm'] = name\n if metadata is None:\n metadata = {}\n\n #\n # due to metadata limitation in openstack do not add the creation time\n #\n\n if 'created' in metadata:\n del metadata['created']\n\n metadata['image'] = image\n metadata['flavor'] = size\n metadata['label'] = vm_label\n\n self.cloudman.set_server_metadata(server, metadata)\n\n self.add_secgroup(name=secgroup)\n\n # server = self.cloudman.compute.wait_for_server(server)\n\n # print(\"ssh -i {key} root@{ip}\".format(\n # key=PRIVATE_KEYPAIR_FILE,\n # ip=server.access_ipv4))\n\n except openstack.exceptions.ResourceTimeout:\n Console.error(\"Problem starting vm in time.\")\n raise TimeoutError\n\n except Exception as e:\n Console.error(\"Problem starting vm\", traceflag=True)\n print(e)\n raise RuntimeError\n\n return self.update_dict(server, kind=\"vm\")[0]",
"def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)",
"def test_create_deployment(self):\n pass",
"def create_environment(args):\n env.username = args.user\n env.password = args.password\n env.service_url = args.service_url\n env.quiet = args.quiet\n env.verbose = args.verbose\n env.manifest = args.manifest\n env.debug = args.debug\n env.always_confirm = args.yes\n env.args = args\n env.api = ravello.RavelloClient(env.username, env.password, env.service_url)",
"def create_and_run_deployment(\n project_id: int = Form(...),\n model_id: Text = Form(...),\n version: Text = Form(...),\n model_uri: Text = Form(...),\n type: Text = Form(...) # pylint: disable=redefined-builtin\n) -> JSONResponse:\n\n deploy_manager = DeployManager()\n deployment_id = deploy_manager.create_deployment(\n project_id, model_id, version, model_uri, type\n )\n return JSONResponse({'deployment_id': str(deployment_id)}, HTTPStatus.ACCEPTED)",
"def deploy():",
"def create_machine(request):\n\n params = params_from_request(request)\n cloud_id = request.matchdict['cloud']\n\n for key in ('name', 'size'):\n if key not in params:\n raise RequiredParameterMissingError(key)\n\n key_id = params.get('key')\n machine_name = params['name']\n location_id = params.get('location', None)\n image_id = params.get('image')\n if not image_id:\n raise RequiredParameterMissingError(\"image\")\n # this is used in libvirt\n disk_size = int(params.get('libvirt_disk_size', 4))\n disk_path = params.get('libvirt_disk_path', '')\n size_id = params['size']\n # deploy_script received as unicode, but ScriptDeployment wants str\n script = str(params.get('script', ''))\n # these are required only for Linode/GCE, passing them anyway\n image_extra = params.get('image_extra', None)\n disk = params.get('disk', None)\n image_name = params.get('image_name', None)\n size_name = params.get('size_name', None)\n location_name = params.get('location_name', None)\n ips = params.get('ips', None)\n monitoring = params.get('monitoring', False)\n networks = params.get('networks', [])\n docker_env = params.get('docker_env', [])\n docker_command = params.get('docker_command', None)\n script_id = params.get('script_id', '')\n script_params = params.get('script_params', '')\n post_script_id = params.get('post_script_id', '')\n post_script_params = params.get('post_script_params', '')\n async = params.get('async', False)\n quantity = params.get('quantity', 1)\n persist = params.get('persist', False)\n docker_port_bindings = params.get('docker_port_bindings', {})\n docker_exposed_ports = params.get('docker_exposed_ports', {})\n azure_port_bindings = params.get('azure_port_bindings', '')\n # hostname: if provided it will be attempted to assign a DNS name\n hostname = params.get('hostname', '')\n plugins = params.get('plugins')\n cloud_init = params.get('cloud_init', '')\n associate_floating_ip = params.get('associate_floating_ip', False)\n associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',\n None)\n project_id = params.get('project', None)\n bare_metal = params.get('bare_metal', False)\n # bare_metal True creates a hardware server in SoftLayer,\n # whule bare_metal False creates a virtual cloud server\n # hourly True is the default setting for SoftLayer hardware\n # servers, while False means the server has montly pricing\n softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)\n hourly = params.get('billing', True)\n job_id = params.get('job_id')\n job_id = params.get('job_id')\n # The `job` variable points to the event that started the job. If a job_id\n # is not provided, then it means that this is the beginning of a new story\n # that starts with a `create_machine` event. If a job_id is provided that\n # means that the current event will be part of already existing, unknown\n # story. TODO: Provide the `job` in the request's params or query it.\n if not job_id:\n job = 'create_machine'\n job_id = uuid.uuid4().hex\n else:\n job = None\n\n # these are needed for OnApp\n size_ram = params.get('size_ram', 256)\n size_cpu = params.get('size_cpu', 1)\n size_disk_primary = params.get('size_disk_primary', 5)\n size_disk_swap = params.get('size_disk_swap', 1)\n boot = params.get('boot', True)\n build = params.get('build', True)\n cpu_priority = params.get('cpu_priority', 1)\n cpu_sockets = params.get('cpu_sockets', 1)\n cpu_threads = params.get('cpu_threads', 1)\n port_speed = params.get('port_speed', 0)\n hypervisor_group_id = params.get('hypervisor_group_id')\n\n auth_context = auth_context_from_request(request)\n\n try:\n Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n # compose schedule as a dict from relative parameters\n if not params.get('schedule_type'):\n schedule = {}\n else:\n if params.get('schedule_type') not in ['crontab',\n 'interval', 'one_off']:\n raise BadRequestError('schedule type must be one of '\n 'these (crontab, interval, one_off)]'\n )\n if params.get('schedule_entry') == {}:\n raise RequiredParameterMissingError('schedule_entry')\n\n schedule = {\n 'name': params.get('name'),\n 'description': params.get('description', ''),\n 'action': params.get('action', ''),\n 'script_id': params.get('schedule_script_id', ''),\n 'schedule_type': params.get('schedule_type'),\n 'schedule_entry': params.get('schedule_entry'),\n 'expires': params.get('expires', ''),\n 'start_after': params.get('start_after', ''),\n 'max_run_count': params.get('max_run_count'),\n 'task_enabled': bool(params.get('task_enabled', True)),\n 'auth_context': auth_context.serialize(),\n }\n\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n auth_context.check_perm(\"cloud\", \"create_resources\", cloud_id)\n tags = auth_context.check_perm(\"machine\", \"create\", None) or {}\n if script_id:\n auth_context.check_perm(\"script\", \"run\", script_id)\n if key_id:\n auth_context.check_perm(\"key\", \"read\", key_id)\n\n # Parse tags.\n try:\n mtags = params.get('tags') or {}\n if not isinstance(mtags, dict):\n if not isinstance(mtags, list):\n raise ValueError()\n if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):\n raise ValueError()\n mtags = {key: val for item in mtags for key, val in item.items()}\n tags.update(mtags)\n except ValueError:\n raise BadRequestError('Invalid tags format. Expecting either a '\n 'dictionary of tags or a list of single-item '\n 'dictionaries')\n\n args = (cloud_id, key_id, machine_name,\n location_id, image_id, size_id,\n image_extra, disk, image_name, size_name,\n location_name, ips, monitoring, networks,\n docker_env, docker_command)\n kwargs = {'script_id': script_id,\n 'script_params': script_params, 'script': script, 'job': job,\n 'job_id': job_id, 'docker_port_bindings': docker_port_bindings,\n 'docker_exposed_ports': docker_exposed_ports,\n 'azure_port_bindings': azure_port_bindings,\n 'hostname': hostname, 'plugins': plugins,\n 'post_script_id': post_script_id,\n 'post_script_params': post_script_params,\n 'disk_size': disk_size,\n 'disk_path': disk_path,\n 'cloud_init': cloud_init,\n 'associate_floating_ip': associate_floating_ip,\n 'associate_floating_ip_subnet': associate_floating_ip_subnet,\n 'project_id': project_id,\n 'bare_metal': bare_metal,\n 'tags': tags,\n 'hourly': hourly,\n 'schedule': schedule,\n 'softlayer_backend_vlan_id': softlayer_backend_vlan_id,\n 'size_ram': size_ram,\n 'size_cpu': size_cpu,\n 'size_disk_primary': size_disk_primary,\n 'size_disk_swap': size_disk_swap,\n 'boot': boot,\n 'build': build,\n 'cpu_priority': cpu_priority,\n 'cpu_sockets': cpu_sockets,\n 'cpu_threads': cpu_threads,\n 'port_speed': port_speed,\n 'hypervisor_group_id': hypervisor_group_id}\n if not async:\n ret = methods.create_machine(auth_context.owner, *args, **kwargs)\n else:\n args = (auth_context.owner.id, ) + args\n kwargs.update({'quantity': quantity, 'persist': persist})\n tasks.create_machine_async.apply_async(args, kwargs, countdown=2)\n ret = {'job_id': job_id}\n ret.update({'job': job})\n return ret",
"def main(argv=None):\n config = parseoptions(argv)\n if config.args.version:\n print (devpi_server.__version__)\n return\n\n\n if config.args.gendeploy:\n from devpi_server.gendeploy import gendeploy\n return gendeploy(config)\n\n configure_logging(config)\n xom = XOM(config)\n return bottle_run(xom)",
"def app_create(self, site_name, virt_path, phys_path, pool=None):\n if virt_path[0] != '/':\n virt_path = '/' + virt_path\n self.core.api.os.shell.cmd('{0} add app /site.name:\"{1}\" /path:\"{2}\" /physicalPath:\"{3}\"'.format(\n self.APP_CMD, site_name, virt_path, phys_path\n ))\n if pool:\n self.core.api.os.shell.cmd('{0} set app \"{1}\" /applicationPool:\"{2}\"'.format(\n self.APP_CMD, site_name + virt_path, pool\n ))",
"def create_embedded():\n from .server import create_application\n return create_application()",
"def deploy(args):\n from scrapyd_client import deploy\n\n sys.argv.pop(1)\n deploy.main()"
] | [
"0.6567448",
"0.6424248",
"0.64162976",
"0.63874674",
"0.63142943",
"0.6168334",
"0.61501795",
"0.61205155",
"0.6112681",
"0.6051007",
"0.60456806",
"0.6044642",
"0.6022895",
"0.599268",
"0.59312016",
"0.58959",
"0.582929",
"0.58238673",
"0.58151454",
"0.580947",
"0.5804975",
"0.57690436",
"0.57503456",
"0.57189924",
"0.56987435",
"0.5698714",
"0.56924397",
"0.56815255",
"0.5681236",
"0.567867"
] | 0.70523727 | 0 |
Delete OS Deployment server. [Arguments] | def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):
return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_server(ServerName=None):\n pass",
"def delete(args):\n if args.tag is not None:\n tag = str(args.tag)\n interface = DigitalOceanSetup.create_interface()\n # Delete everything matching the tag\n interface.destroy_machines_by_tag(tag)\n elif args.delete_list:\n server_list = read_server_file()\n if len(server_list) == 1:\n interface = DigitalOceanSetup.create_interface()\n droplet_details = server_list[0]\n # Download the save game from the server\n if args.save:\n eprint(\"Running Ansible...\")\n os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] = \"False\"\n process = subprocess.Popen([\"ansible-playbook\", \"-i\",\n droplet_details[\"name\"] + \",\",\n \"--private-key\", \"~/.ssh/id_rsa\",\n \"save-factorio.yml\"],\n stdout=subprocess.PIPE)\n out, _ = process.communicate()\n eprint(out)\n # Now destory the droplet\n interface.destroy_machine_by_id(droplet_details[\"id\"])\n # Save empty list to file\n save_dict_to_file(\"servers.json\", [])\n else:\n eprint(\"Too many or no items in server list.\")\n else:\n eprint(\"Missing arguments.\")",
"def delete_deployment(request, deployment, **_kwargs):\n pass",
"def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"",
"def DeleteServer(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def DeleteModelVersionsDeployment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_delete_deployment(self):\n pass",
"def delete(self):\n raise NotImplementedError(\"Deleting not supported for servers\")",
"def delete_server(self, request, tenant_id, server_id):\n response_data = delete_server(server_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])",
"def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name",
"def test_delete_hyperflex_server_firmware_version(self):\n pass",
"def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0",
"def test_delete_deployment_run(self):\n pass",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def delete_server(self, server_id, force=False):\n endpoint = 'application/servers/{}'.format(server_id)\n if force:\n endpoint += '/force'\n\n response = self._api_request(endpoint=endpoint, mode='DELETE')\n return response",
"def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)",
"def site_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete site \"{1}\"'.format(self.APP_CMD, name))",
"def delete(name, config, backend, storage, debug):\n setup_lithops_logger(logging.DEBUG)\n\n verify_runtime_name(name)\n\n if config:\n config = load_yaml_config(config)\n\n setup_lithops_logger(logging.DEBUG)\n\n config_ow = set_config_ow(backend, storage, runtime_name=name)\n config = default_config(config, config_ow)\n\n if config['lithops']['mode'] != SERVERLESS:\n raise Exception('\"lithops runtime delete\" command is only valid for serverless backends')\n\n storage_config = extract_storage_config(config)\n internal_storage = InternalStorage(storage_config)\n compute_config = extract_serverless_config(config)\n compute_handler = ServerlessHandler(compute_config, internal_storage)\n\n runtimes = compute_handler.list_runtimes(name)\n for runtime in runtimes:\n compute_handler.delete_runtime(runtime[0], runtime[1])\n runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])\n internal_storage.delete_runtime_meta(runtime_key)",
"def step_delete(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console delete --resource-group {resourceGroup} \"\n \"--virtual-machine-name {virtualMachineName} --yes\",\n checks=checks,\n )",
"def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.delete(args)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def delete_server(self, request, server_id):\n group = self.store.get_scaling_group(\n self.log, self.tenant_id, self.scaling_group_id)\n log = self.log.bind(server_id=server_id)\n d = controller.modify_and_trigger(\n self.dispatcher,\n group,\n bound_log_kwargs(log),\n partial(controller.remove_server_from_group,\n self.dispatcher,\n log,\n transaction_id(request), server_id,\n extract_bool_arg(request, 'replace', True),\n extract_bool_arg(request, 'purge', True)),\n modify_state_reason='delete_server')\n return d",
"def delete_syslog_server(client_session):\n\n cfg_result = client_session.delete('systemSyslogServer')\n\n if cfg_result['status'] == 204:\n return True\n else:\n return False",
"def delete_server(self, server_id):\n if server_id:\n logging.debug(\"Deleting {}\".format(server_id))\n response = self._request(\"DELETE\", [ROUTE_SERVERS, server_id])\n return self.verif_response(response)\n\n logging.error(\"No server id specific for delete\")\n return False",
"def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers)"
] | [
"0.7311343",
"0.7191963",
"0.698584",
"0.65994453",
"0.64927745",
"0.6424388",
"0.6421005",
"0.63916403",
"0.63363886",
"0.62683016",
"0.6262936",
"0.62379235",
"0.6201516",
"0.6142421",
"0.61166936",
"0.61104494",
"0.6102672",
"0.60981226",
"0.59932476",
"0.598643",
"0.5975752",
"0.595752",
"0.5948545",
"0.5906848",
"0.5866528",
"0.58502233",
"0.58399874",
"0.58244485",
"0.5811842",
"0.579962"
] | 0.758158 | 0 |
Gets value of the OS Deployment Server is available [Arguments] | def fusion_api_get_os_deploymentserver(self, uri=None, param='', api=None, headers=None):
return self.osds.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def server(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server\")",
"def get_server():\n pass",
"def config_server(self) -> Optional[pulumi.Input['ConfigServerSettingsArgs']]:\n return pulumi.get(self, \"config_server\")",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)",
"def selenium_server(request) -> Optional[str]:\n return (\n # CLI arg takes precedence\n request.config.getoption(\"selenium_server\")\n # Otherwise, we look for a non-empty string\n or os.environ.get('SELENIUM_SERVER', '').strip()\n # If the result is still Falsey, we always return None.\n or None\n )",
"def installed_server_number(self) -> Optional[int]:\n return pulumi.get(self, \"installed_server_number\")",
"def server_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_version\")",
"def server_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_version\")",
"def get_server_version(object = server_status_req):\n try:\n response = urllib2.urlopen(object).read()\n server_connect = json.loads(response)\n return server_connect['GetSeverStatus']\n except URLError, e:\n print 'Error: No Response From Server.'",
"def software_config(self) -> Optional[pulumi.Input['RuntimeSoftwareConfigArgs']]:\n return pulumi.get(self, \"software_config\")",
"def server(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"server\")",
"def server(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"server\")",
"def GetServerHost():\n return GetHostName(True)",
"def get_server():\n\n instance = Ceic._get_instance()\n\n return instance._ceic_configuration.server",
"def snitun_server(self) -> Optional[str]:\n return self._snitun_server",
"def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'])",
"def get_host_os_version(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetHostOsVersion', self.handle)",
"def get_os_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_OS_INFO)",
"def GetOSName():\n return Config.osName_",
"def get_server_host(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetServerHost', self.handle)",
"def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)",
"def get_server(self):\n\n pass",
"def get_server_version(self):\n return self.__aceQLHttpApi.get_server_version()",
"def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")",
"def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")",
"def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)",
"def remote_info():\n run('uname -a')",
"def LicenseServer(self):\n return self._get_attribute('licenseServer')"
] | [
"0.6122306",
"0.6116507",
"0.60377234",
"0.5933984",
"0.59329784",
"0.59246665",
"0.5922845",
"0.5922845",
"0.58639693",
"0.5771792",
"0.57105774",
"0.57105774",
"0.56729096",
"0.56728786",
"0.5656897",
"0.5648294",
"0.5638455",
"0.56105363",
"0.5601789",
"0.55934906",
"0.5585368",
"0.55712",
"0.5555253",
"0.5549622",
"0.5507962",
"0.55071294",
"0.54981935",
"0.54976034",
"0.5489158",
"0.5485456"
] | 0.659178 | 0 |
Gets value of an i3s appliance [Arguments] | def fusion_api_get_i3sappliance_uri(self, uri=None, param='', api=None, headers=None):
return self.osds.geti3suri(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_current_ami ( s3_infra_conn, region_name, env_type, app_name ) :\n ami_bucket = get_admin_bucket_name( region_name = region_name )\n return retrieve_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = ami_bucket,\n key_name = get_ami_keypath( env_type ) + get_ami_keyname( app_name ) )",
"def get(self, item_type, entry_name):\n armor = ArmorData(name=entry_name)\n if not hasattr(armor, 'level'):\n return {\"Error\": f\"'{entry_name}' not found in {item_type} armor. \"\n f\"Try this: '{NS.armor._path}/{item_type}/search/\"\n f\"{entry_name.replace(' ', '%20')}'\"\n }, 404\n return armor.associative_data()",
"def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))",
"def get_value_for_attribute(attribute):\n path = '/computeMetadata/v1/instance/attributes/%s' % attribute\n try:\n http_response = _issue_http_request(\n HTTP_GET, path, REQUIRED_METADATA_HEADER)\n return http_response.read()\n except (TypeError, ValueError, errors.MetadataServerHttpError):\n LOGGER.exception('Unable to read value for attribute key %s '\n 'from metadata server.', attribute)\n return None",
"async def get_json_argument(self, item, deft=None):\n return self.json.get(item, deft)",
"def getArmor(self):\n return self.av",
"def getValue(self, args):\r\n return self.provider(self.name, args)",
"def get_value(arg):\n if arg in self.args_repository:\n return self.args_repository[arg]\n if arg in self.data_repository:\n return self.data_repository[arg]\n print_error(\"value for mandatory argument '{0}' not available in \"\n \"data_repository/args_repository\".format(args))\n return None",
"def storage_appliance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_appliance_name\")",
"def get_value(self, section_name: str, attr_name: str, env_override: bool = False) -> typing.Optional[str]:\n key = f'{section_name}_{attr_name}'.upper()\n result: typing.Optional[str] = None\n if env_override:\n result = os.getenv(key)\n if result is None:\n response = self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n result = response[\"data\"][\"data\"].get(attr_name.upper())\n return result",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def cloud_assembly_input(self) -> aws_cdk.aws_codepipeline.Artifact:\n return self._values.get(\"cloud_assembly_input\")",
"def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")",
"def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)",
"def subcmd_getstorage_main(args, parameter_info):\n \n from get_storage_inventory import get_storage_inventory\n result = get_storage_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])",
"def s3(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"s3\")",
"def get_apk(self):",
"def get_value(value, key, client):\n if client is None:\n return value.__dict__[key]\n elif \"glance\" in str(client):\n return value[key]\n elif \"cinder\" in str(client):\n return value.__dict__[key]\n elif \"nova\" in str(client):\n return value.__dict__[key]",
"def get_boto3_version() -> str:\n return boto3_version",
"def get_bucket_iam_policy_output(bucket: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBucketIamPolicyResult]:\n ...",
"def get_manifest_data(bucket,team, dataset,manifest_key):\n dynamo_config = DynamoConfiguration()\n dynamo_interface = DynamoInterface(dynamo_config)\n s3_interface = S3Interface()\n local_path = s3_interface.download_object(bucket, manifest_key)\n ddb_keys=[]\n items=[]\n with open(local_path, \"r\") as raw_file:\n file_names = [file_name.strip().split(\"/\")[-1]\n for file_name in raw_file]\n for file in file_names:\n ddb_keys.append({\n \"dataset_name\": team+\"-\"+dataset,\n \"manifest_file_name\": manifest_key.split(\"/\")[-1], \"datafile_name\": file\n })\n for ddb_key in ddb_keys:\n try:\n items.append(dynamo_interface.get_item_from_manifests_control_table(\n ddb_key[\"dataset_name\"], ddb_key[\"manifest_file_name\"], ddb_key[\"datafile_name\"]))\n except KeyError:\n logger.error(\"The manifest file has not been processed in Stage A\")\n raise Exception(\"Manifest File has not been processed in Stage A\")\n\n return items",
"def get(self, key):\n return s3.Object(self.bucket.name, key).get()['Body'].read()",
"def getitem(\n self, obj: t.Any, argument: t.Union[str, t.Any]\n ) -> t.Union[t.Any, Undefined]:\n try:\n return obj[argument]\n except (AttributeError, TypeError, LookupError):\n if isinstance(argument, str):\n try:\n attr = str(argument)\n except Exception:\n pass\n else:\n try:\n return getattr(obj, attr)\n except AttributeError:\n pass\n return self.undefined(obj=obj, name=argument)",
"def get(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name=name, check_mode=check_mode, force=force)\n id = ret_obj['data']\n\n if id == {}:\n warnings = [\"STS Chain {0} had no match, skipping retrieval.\".format(name)]\n return isamAppliance.create_return_object(warnings=warnings)\n else:\n return _get(isamAppliance, id)",
"def getApp(self, args):\n \n try:\n return args[2]\n except:\n raise ArgsException, '3rd argument should be the application(path) for creating the sql backup'",
"def amz_main():\n\n product = sys.argv[1:]\n product_key = \"+\".join(product)\n\n source = \"https://amazon.in/s\"\n prod = {\"k\" : product_key.lower()}\n HEADERS = ({\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\"})\n\n req = error_handler(source, prod, HEADERS)\n \n soup = bs(req.text, \"lxml\")\n amz_res = amz_details(soup, product, HEADERS)\n return amz_res"
] | [
"0.6054499",
"0.5129115",
"0.5069351",
"0.50430375",
"0.50374556",
"0.5037109",
"0.49930125",
"0.49121296",
"0.48898426",
"0.48777938",
"0.4867673",
"0.4867673",
"0.4867673",
"0.4867673",
"0.4867673",
"0.4867673",
"0.48612306",
"0.48609614",
"0.48272517",
"0.4798483",
"0.47864127",
"0.4784851",
"0.47174093",
"0.471428",
"0.47116733",
"0.4681452",
"0.4679554",
"0.46769032",
"0.467495",
"0.46347418"
] | 0.5175439 | 1 |
Gets OS Deployment Plan [Arguments] | def fusion_api_get_os_deploymentplan(self, uri=None, param='', api=None, headers=None):
return self.osds.getosdp(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_deployment_parameters(plan_name):\n pass",
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def deploy_plan(plan_name):\n pass",
"def plan(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"plan\")",
"def get_plan(self):\n sub = self.get_subscription()\n return sub.plan",
"def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)",
"def get_plan(self, name: str, json_output: bool = False):\n if not name:\n _exit_if_errors(['--name is required'])\n\n plan, errors = self.rest.get_backup_plan(name)\n _exit_if_errors(errors)\n if json_output:\n print(json.dumps(plan, indent=2))\n else:\n self.human_print_plan(plan)",
"def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'AppServicePlan':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"free_offer_expiration_time\"] = None\n __props__[\"geo_region\"] = None\n __props__[\"hosting_environment_profile\"] = None\n __props__[\"hyper_v\"] = None\n __props__[\"is_spot\"] = None\n __props__[\"is_xenon\"] = None\n __props__[\"kind\"] = None\n __props__[\"location\"] = None\n __props__[\"maximum_elastic_worker_count\"] = None\n __props__[\"maximum_number_of_workers\"] = None\n __props__[\"name\"] = None\n __props__[\"number_of_sites\"] = None\n __props__[\"per_site_scaling\"] = None\n __props__[\"provisioning_state\"] = None\n __props__[\"reserved\"] = None\n __props__[\"resource_group\"] = None\n __props__[\"sku\"] = None\n __props__[\"spot_expiration_time\"] = None\n __props__[\"status\"] = None\n __props__[\"subscription\"] = None\n __props__[\"system_data\"] = None\n __props__[\"tags\"] = None\n __props__[\"target_worker_count\"] = None\n __props__[\"target_worker_size_id\"] = None\n __props__[\"type\"] = None\n __props__[\"worker_tier_name\"] = None\n return AppServicePlan(resource_name, opts=opts, __props__=__props__)",
"def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)",
"def test_get_deployment_runs_in_virtualization_realm(self):\n pass",
"def human_print_plan(plan: object):\n print(f'Name: {plan[\"name\"]}')\n print(f'Description: {plan[\"description\"] if \"description\" in plan else \"N/A\"}')\n print(f'Services: {BackupServicePlan.service_list_to_str(plan[\"services\"])}')\n print(f'Default: {(plan[\"default\"] if \"deafult\" in plan else False)!s}')\n\n # If the are no tasks return\n if not plan[\"tasks\"]:\n return\n\n print()\n print('Tasks:')\n task_name_pad = 5\n schedule_pad = 10\n for task in plan['tasks']:\n if len(task['name']) > task_name_pad:\n task_name_pad = len(task['name'])\n\n task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])\n if len(task['schedule_str']) > schedule_pad:\n schedule_pad = len(task['schedule_str'])\n\n task_name_pad += 1\n schedule_pad += 1\n\n header = f'{\"Name\":<{task_name_pad}} | {\"Schedule\":<{schedule_pad}} | Options'\n print(header)\n print('-' * (len(header) + 5))\n\n for task in plan['tasks']:\n options = BackupServicePlan.format_options(task)\n print(f'{task[\"name\"]:<{task_name_pad}} | {task[\"schedule_str\"]:<{schedule_pad}} | {options}')",
"def pricing_plan(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pricing_plan\")",
"def list(cls):\n return cls().requests.get('plan')",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def get_deployment_output(account_name: Optional[pulumi.Input[str]] = None,\n deployment_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDeploymentResult]:\n ...",
"def pricing_plan(self) -> str:\n return pulumi.get(self, \"pricing_plan\")",
"def get_migration_plan():\n executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])\n plan = executor.migration_plan(executor.loader.graph.leaf_nodes())\n return plan",
"def get_deployment_roles(plan_name):\n pass",
"def getPlan(self):\n return StripePlan(self.base.get(\"plan\", []))",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def _get_service_plan(self, service_name, service_plan_name):\n self._assert_space()\n key = ' / '.join([service_name, service_plan_name])\n if key in self._service_plan:\n return self._service_plan[key]\n self._get_service(service_name)\n service_plan_url = self._service['entity']['service_plans_url']\n res = self._cc.request(service_plan_url).get()\n for plan in res.resources:\n if service_plan_name == plan['entity']['name']:\n self._service_plan[key] = plan\n break\n return self._service_plan[key]",
"def test_get_deployment_run(self):\n pass",
"def select(self, req):\n ctx = req.environ['nova.context']\n qs = req.environ['QUERY_STRING']\n param_dict = urlparse.parse_qs(qs)\n param_dict.pop(\"fresh\", None)\n # parse_qs returns a dict where the values are lists,\n # since query strings can have multiple values for the\n # same key. We need to convert that to single values.\n for key in param_dict:\n param_dict[key] = param_dict[key][0]\n build_plan = api.select(ctx, specs=param_dict)\n cooked = self._scrub_build_plan(build_plan)\n return {\"weights\": cooked}",
"def view_deployment(request, deployment, **_kwargs):\n pass",
"def get_deployment_template_resource_types(plan_name):\n pass",
"def choosePlan(prompt='Choose a Plan and press OK', title='Choose a Plan'):\n case = rsl.get_current('Case')\n planNames = [ pln.Name for pln in case.TreatmentPlans ]\n return getChoiceFromList(choiceList=planNames, prompt=prompt, title=title)",
"def get(self, **kwargs):\n _plans = self._plans.query(**kwargs)\n\n if not _plans:\n raise PlanNotFoundError\n\n return _plans",
"def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))",
"def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")"
] | [
"0.68726015",
"0.63036317",
"0.59006155",
"0.5568436",
"0.5565372",
"0.55153424",
"0.5511352",
"0.5364062",
"0.5345589",
"0.53239554",
"0.5298429",
"0.5253463",
"0.52343905",
"0.5222922",
"0.521661",
"0.5208022",
"0.5189935",
"0.5159971",
"0.51265657",
"0.50976896",
"0.5071507",
"0.50607705",
"0.5023047",
"0.49664178",
"0.49494928",
"0.49337485",
"0.49331397",
"0.4919941",
"0.4914892",
"0.49118578"
] | 0.6821857 | 1 |
Adds Fabric Manager to One View [Arguments] | def fusion_api_create_fabric_manager(self, body, api=None, headers=None):
return self.fabricmanager.post(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_view( *args, **kwargs ):",
"def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)",
"def manager():\n pass",
"def add_views(self, *args):\n for view in args:\n self.add_view(view)",
"def manage_afterAdd(self, item, container) :\n item.manage_permission(Permissions.AddPortalContent,\n ['Manager'])\n item.manage_permission(Permissions.AccessContentsInformation,\n ['Member', 'Manager'])\n item.manage_permission(Permissions.View,\n ['Manager',])\n BaseTool.inheritedAttribute('manage_afterAdd')(self, item, container)",
"def admin(self, view):\n view.admin = True\n return view",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return",
"def show_manager(self):\n\n if self.manager.wid.table.rowCount() == 0:\n self.manager.wid.add_row(0)\n\n self.manager.show()",
"def managerMuestra(self):\n self.modelo.managerMuestra(self.buttons, self.buttonReleased)",
"def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)",
"def __init__(self, **manager_commands):\n self.package = manager_commands",
"def show_add_actor(self):\n\t\tformulario = view_form_actor.Form(self)\n\t\tformulario.exec_()\n\t\tself.load_data()",
"def init(self, view, settings):\n if not view:\n return\n current_dir = path.dirname(view.file_name())\n search_scope = SearchScope(\n from_folder=current_dir,\n to_folder=settings.project_base_folder)\n self.flags_manager = FlagsManager(\n use_cmake=settings.generate_flags_with_cmake,\n flags_update_strategy=settings.cmake_flags_priority,\n cmake_prefix_paths=settings.cmake_prefix_paths,\n search_scope=search_scope)",
"def dlgMM(parser, args):\n dlgCompositeManager(parser, args, MasterManager, 'MM', MASTER_DEFAULT_REST_PORT, MasterManagerRestServer)",
"def viewer_setup(self):\n pass",
"def viewer_setup(self):\n pass",
"def _add_view_to_menu(self, view):\n self._add_menu_item(MenuView(view.name, view), view.category)",
"def setup(client):\n client.add_cog(ProcessDisplay(client))",
"def addUpdateUI(call, args=(), kwargs={}, nodeClass='*'):",
"def OnAdd(self, controller):\n pass",
"def view_add(self):\n is_admin = self.request.view_name == \"admin-add\"\n if self.schema_add is None:\n raise HTTPNotFound()\n kwargs = self.request.json\n jsonschema.validate(instance=kwargs, schema=self.schema_add)\n child_view = self.add(**kwargs)\n if is_admin:\n return child_view.admin_tile\n else:\n return child_view.tile",
"def fusion_api_edit_fabric_manager(self, body, uri, api=None, headers=None):\n return self.fabricmanager.put(body=body, uri=uri, api=api, headers=headers)",
"def OnFindManager(self, event):\r\n \r\n event.SetManager(self._owner_mgr)",
"def view_system():\n\n pass",
"def _addView(self, win, fn=None, noName=\"\", addNext=False, indexes=None):\n raise RuntimeError('Not implemented')",
"def fabricshow(obj, content):\n ri, fab_obj, proj_obj = 0, None, obj.r_project_obj()\n\n # Skip to where the fabric list starts (after the '-----------------------')\n for buf in content:\n buf = content[ri]\n ri += 1\n if '-version' in buf or 'no fabric' in buf or 'SS CMD END' in buf:\n return fab_obj, ri\n if '-----------------------' in buf:\n break\n\n brocade_fabric = list()\n while len(content) > ri:\n buf = content[ri]\n ri += 1\n if len(buf) == 0 or 'The Fabric has' in buf or 'Fabric had' in buf or 'SS CMD END' in buf:\n break\n l = gen_util.remove_duplicate_char(buf.strip(), ' ').split(' ')\n if len(l) > 5:\n if l[5][0] == '>': # It's the principal switch\n fab_obj = proj_obj.s_add_fabric(l[2])\n brocade_fabric.append({\n 'domain-id': int(l[0].replace(':', '')),\n 'fcid-hex': '0x' + l[1],\n 'name': l[2],\n 'ip-address': brcdapi_util.mask_ip_addr(l[3]),\n 'fcip-address': brcdapi_util.mask_ip_addr(l[4]),\n 'principal': 1 if '>' in l[5] else 0,\n 'switch-user-friendly-name': l[5].replace('\"', '').replace('>', ''),\n })\n\n if fab_obj is not None:\n brcddb_util.add_to_obj(fab_obj, 'brocade-fabric/fabric-switch', brocade_fabric)\n for d in brocade_fabric:\n fab_obj.s_add_switch(d['name'])\n\n return fab_obj, ri",
"def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)"
] | [
"0.59634304",
"0.5662595",
"0.5455031",
"0.5405313",
"0.53803706",
"0.534478",
"0.5262761",
"0.5262761",
"0.5262761",
"0.5087117",
"0.5053838",
"0.504193",
"0.50273764",
"0.50249666",
"0.5023837",
"0.5022318",
"0.4999911",
"0.49980935",
"0.49980935",
"0.49811444",
"0.4964837",
"0.49637058",
"0.4953029",
"0.49514386",
"0.49459106",
"0.49345097",
"0.49219772",
"0.49071342",
"0.490674",
"0.4894984"
] | 0.5848207 | 1 |
Gets the Fabric Manager details for the provided name or list of all Fabric Managers if name is not provided [Arguments] | def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):
return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_fabric_manager_tenants(self, uri, name=None, param='', api=None, headers=None):\n param = '/tenants/'\n if name:\n param += '?&filter=\"\\'name\\' == \\'%s\\'\"' % (name)\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)",
"def get_manager(self, name):\n\n if name == \"control\":\n manager = self._control_manager\n elif name == \"alarm\":\n manager = self._alarm_manager\n elif name == \"state\":\n manager = self._machine_manager\n else:\n manager = self._function_manager\n\n return manager",
"def get_managers():\n return {'managers': get_users('managers')}",
"def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body",
"def get_system_managers(only_name: bool = False) -> list[str]:\n\tHasRole = DocType(\"Has Role\")\n\tUser = DocType(\"User\")\n\n\tif only_name:\n\t\tfields = [User.name]\n\telse:\n\t\tfields = [User.full_name, User.name]\n\n\tsystem_managers = (\n\t\tfrappe.qb.from_(User)\n\t\t.join(HasRole)\n\t\t.on(HasRole.parent == User.name)\n\t\t.where(\n\t\t\t(HasRole.parenttype == \"User\")\n\t\t\t& (User.enabled == 1)\n\t\t\t& (HasRole.role == \"System Manager\")\n\t\t\t& (User.docstatus < 2)\n\t\t\t& (User.name.notin(frappe.STANDARD_USERS))\n\t\t)\n\t\t.select(*fields)\n\t\t.orderby(User.creation, order=Order.desc)\n\t\t.run(as_dict=True)\n\t)\n\n\tif only_name:\n\t\treturn [p.name for p in system_managers]\n\telse:\n\t\treturn [formataddr((p.full_name, p.name)) for p in system_managers]",
"def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info",
"def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)",
"def manager_configs_list(self):\n _, body = self.request('/v1.1/managers/configs', 'GET')\n return body",
"def get_admin_by_name(self, uid, name):\n admin_data = self.list_admin_roles(uid)\n for admin in admin_data:\n if admin['name'] == name:\n return ZenossDeviceManagementAdmin(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n admin\n )\n\n return None",
"def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def GetManager(self):\r\n\r\n return self.manager",
"def get_managers_list(self):\n try:\n role_id = [x[0] for x in self.db_handler.get_roles_list() if x[1] == 'Менеджер'][0]\n staff_by_role = self.db_handler.get_all_staff_by_role_id(role_id)\n\n self.logger.write_to_log('managers list got', 'model')\n\n return staff_by_role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def get_instance_group_manager(self, name, zone):\n return self.call_api('/zones/%s/instanceGroupManagers/%s' % (zone, name))",
"def getPackageManager(self) -> None:\n\t\tfor pkgmgr in config.SUPPORTED_PACKAGE_MGRS:\n\t\t\tif subprocess.run([\"which\", pkgmgr]).returncode == 0:\n\t\t\t\tself.package_manager = pkgmgr\n\t\t\t\treturn\n\t\tlogger.error(\"Supported package manager not found, aborting.\")\n\t\traise ValueError(\"Package manager unsupported\")",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def use_manager(manager_name, environment):\n assert manager_name in environment.managers, (\n 'Manager {selected} was not created by this test run. '\n 'Available managers are: {available}'.format(\n selected=manager_name,\n available=', '.join(environment.managers.keys()),\n )\n )\n\n manager = environment.managers[manager_name]\n\n environment.add_cleanup(\n environment.cfy.profiles.delete,\n kwargs={\n 'profile_name': manager['ip'],\n },\n )\n\n environment.cfy.profiles.use(\n ip=manager['ip'],\n username=manager['username'],\n password=manager['password'],\n rest_certificate=manager['certificate_path'],\n )",
"def manager():\n pass",
"def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body",
"def get_instance_group_managers(self, zone):\n response = self.call_api('/zones/%s/instanceGroupManagers' % zone)\n return {manager['name']: manager for manager in response.get('items', [])}",
"def get_manager():\n return __manager__",
"def get_mgr(cls, id):\n assert id in cls.s_memory_mgrs, 'invalid id[%s] for memory managers' % (\n id)\n return cls.s_memory_mgrs[id]",
"def info(self, name=None):\n data = self.cloudman.list_servers(filters={'name': name})\n\n \"\"\"\n vms = self.list()\n print (\"VMS\", vms)\n data = None\n for entry in vms:\n print (\"FFF\", entry['name'])\n if entry['name'] == name:\n data = entry\n break\n \"\"\"\n\n if data is None:\n raise ValueError(f\"vm not found {name}\")\n\n r = self.update_dict(data, kind=\"vm\")\n return r",
"def get_personnel():\r\n if len(man) == 0:\r\n print(\"There are no managers\")\r\n else:\r\n for i in man:\r\n print(str(i))",
"def get_available_package_manager(self):\n for manager in self.package.keys():\n try:\n executable = self.SUPPORTED_PACKAGE_MANAGERS[manager]\n if is_executable_exists(executable):\n return manager\n except KeyError:\n raise NotImplementedError(\"{} is not supported\".format(manager))\n raise NotImplementedError(\"This system doesn't have any of the \"\n 'supported package manager(s): '\n '{}'.format(','.join(self.package.keys())))",
"def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body",
"def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None",
"def getFeatureManager(address=None):\n return __mgr_cache__[address]",
"def getManager(self):\n return self._manager",
"def fusion_api_get_fabric_manager_report(self, uri, api=None, headers=None):\n param = '/report/'\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)",
"def list_podmanager(cls):\n return cls.dbdriver.list_podmanager()"
] | [
"0.60854346",
"0.59693724",
"0.59690547",
"0.59468293",
"0.56654155",
"0.5441895",
"0.53470236",
"0.5284382",
"0.52781856",
"0.5217851",
"0.520762",
"0.51865655",
"0.5180018",
"0.51691216",
"0.514083",
"0.51072943",
"0.50171417",
"0.49826366",
"0.49755776",
"0.49681163",
"0.49610028",
"0.49577922",
"0.49464494",
"0.4906325",
"0.48958537",
"0.48890737",
"0.48723644",
"0.4846851",
"0.48426032",
"0.4815462"
] | 0.63042766 | 0 |
Deletes the Fabric Manager [Arguments] | def fusion_api_delete_fabric_manager(self, name, uri=None, api=None, headers=None):
return self.fabricmanager.delete(name=name, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rm(args):\n args.delete = True\n return remove(args)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n os.system(\"rm \"+self._name)",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])",
"def delete(self):\n self.manager.delete(self)",
"def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def do_destroy(self, arg):\n jail_destroy('destroy', arg)",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()",
"def destroy(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n if instance_name:\n instance = utils.settle_instance(instance_name)\n path = instance['path']\n else:\n path = os.getcwd()\n mech_path = os.path.join(path, '.mech')\n\n if os.path.exists(mech_path):\n if force or utils.confirm(\"Are you sure you want to delete {instance_name} at {path}\".format(instance_name=instance_name, path=path), default='n'):\n puts_err(colored.green(\"Deleting...\"))\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n vmrun.stop(mode='hard', quiet=True)\n time.sleep(3)\n vmrun.deleteVM()\n shutil.rmtree(mech_path)\n else:\n puts_err(colored.red(\"Deletion aborted\"))\n else:\n puts_err(colored.red(\"The box hasn't been initialized.\"))",
"def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def manager_remove(self, manager):\n self.request('/v1.1/managers/configs/%s' % manager, 'DELETE')",
"def destroy(self):\n res = subprocess.run(\"{} rm {}\".format(self.binary,\n self.args['name']))\n if res.returncode != 0:\n sys.exit(2)\n return res",
"def delete():",
"def delete_podmanager(cls, podmanager_uuid):\n cls.dbdriver.delete_podmanager(podmanager_uuid)",
"def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)",
"def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()",
"def delete_controller(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns cluser delete name\")\n config.delete_object(name=args[0], kind='Controller')",
"def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)",
"def delete_from_provider(self, builder, provider, credentials, target, parameters):",
"def delete_machine(args):\n session = Session()\n # the following is used to help with code completion\n \"\"\"session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\"\"\"\n machine = session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).first()\n if machine is not None:\n print \"Deleting machine with hostname: \" + machine.hostname + \" and with id: \" + str(machine.id)\n session.query(PoolMachine).filter(PoolMachine.hostname==args.hostname).delete()\n session.commit()\n else:\n print \"No machine was found!\"",
"def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")",
"def delete(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\t_objstore.delete(args['type'], args['name'])\n\treturn {'message':'ok'}",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])"
] | [
"0.6673103",
"0.6598021",
"0.6598021",
"0.65250075",
"0.6506779",
"0.64955616",
"0.64426833",
"0.6403957",
"0.63426346",
"0.6341505",
"0.6298142",
"0.6287084",
"0.62784547",
"0.6251517",
"0.6181623",
"0.6166767",
"0.61451185",
"0.6132013",
"0.6025371",
"0.6014678",
"0.6004683",
"0.60012263",
"0.59893435",
"0.59748024",
"0.5971648",
"0.59674203",
"0.5959444",
"0.59512264",
"0.594365",
"0.59352267"
] | 0.72733957 | 0 |
Edits Fabric Manager [Arguments] | def fusion_api_edit_fabric_manager(self, body, uri, api=None, headers=None):
return self.fabricmanager.put(body=body, uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit():",
"def edit(self, **kwargs):\n ...",
"async def edit(self, *, name, roles: Optional[Any] = ..., reason: Optional[Any] = ...):\n ...",
"def edit(argv):\n output = lib.output.CLIoutput(\"vadapter\")\n valid_list = ['assignment_type','component_mask' ,'vfabric_id', 'init_type', 'io_module_id', 'name',\n 'mac', 'promiscuous', 'protocol', 'silent_listener', 'vlan' ,'wwnn',\n 'wwpn','status']\n\n if (len(argv) < 2 ):\n output.completeOutputError(lib.errorhandler.InvalidArgumentCount(3, \"vadapter-name\", syntax=edit.__doc__,\n descape = \"Please specify the vadapter id\"))\n return output\n\n if ( argv[1] == '?' or argv[1] == 'help'):\n output.completeOutputError(lib.errorhandler.InvalidArgumentCount(syntax=edit.__doc__, descape = \"Help\"))\n return output\n\n if argv[2].lower() == 'online':\n if isEditName(argv[1]) == -1:\n print \"Error Not a valid Id\"\n return output \n else:\n dict = {}\n dict['id'] = int(argv[1])\n try:\n result = vfm.py_vfm_vadapter_online(dict)\n except StandardError, e:\n print \"Error!\" ,e\n return output \n else:\n print result\n return output\n\n _parse_edit_or_add_argv(output, argv, valid_list,syntax = edit.__doc__ , call_from = 'edit' ) \n\n return output",
"def update(self, args):\n pass",
"def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.CLUSTER_NAME)\n parser.add_custom_argument('--key', required=True, action='store',\n help='Name of ESXi Advanced Setting to update')\n parser.add_custom_argument('--value', required=True, action='store',\n help='Value of the ESXi Advanced Setting to update')\n args = parser.get_args()\n try:\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n cluster = pchelper.get_obj(content, [vim.ClusterComputeResource], args.cluster_name)\n\n hosts = cluster.host\n for host in hosts:\n option_manager = host.configManager.advancedOption\n option = vim.option.OptionValue(key=args.key,\n value=int(args.value))\n print(\"Updating %s on ESXi host %s \"\n \"with value of %s\" % (args.key, host.name, args.value))\n if option_manager.UpdateOptions(changedValue=[option]):\n print(\"Settings updated!\")\n\n except vmodl.MethodFault as ex:\n print(\"Caught vmodl fault : \" + ex.msg)\n return -1\n except Exception as ex:\n print(\"Caught exception : \" + str(ex))\n return -1\n\n return 0",
"def setInfo(*args):",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def edit(pac, man):\n # TODO: take editor from commandline\n # fink has no edit function\n if man == 'fink':\n # fink dumpinfo -finfofile pac | cut -d: -f2 | xargs $editor\n rawdump = Popen(['fink', 'dumpinfo', '-finfofile', pac],\n stdout=PIPE).communicate()[0]\n os.system('open ' + rawdump.split(':')[1])\n elif man == 'brew':\n # this might need adjustments based on if .info files are asociated\n os.system('brew edit ' + pac)\n elif man == 'port':\n os.system('port edit ' + pac)",
"async def edit(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)",
"def cli(ctx):",
"def cli(ctx):",
"def update(*args):",
"def _edit_user(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows: \")\n for i in users:\n print(users[i][\"name\"])\n #List specific user's settings and get user id\n userID = self._list_user_settings(users)\n #Loop until valid option given\n option = False\n while not option:\n option = input(\"Please enter the setting you would like to change: \")\n if option not in users[userID]:\n option = False\n print(\"That setting is not valid.\")\n #Get input for new setting\n args = input(\"Please enter what you would like to change that setting to: \")\n #Output\n command = \"edit_user {0} {1} {2}\\r\\n\".format(userID, option, args)\n return(command)",
"def _edit_setting(self):\n settings = fileIO.load_json(\"settings.json\")\n self._list_settings(settings=settings)\n option = False\n while not option: #While loop until valid setting given\n option = input(\"Please type the setting you would like to change: \")\n if option not in settings:\n option = False\n newSetting = input(\"Please enter what you would like to change that setting to: \")\n command = \"edit_setting {0} {1}\".format(option, newSetting)\n return(command)",
"def alter(self,\r\n owner=None,\r\n version=None,\r\n description=None,\r\n permission=None):\r\n url = \"%s/alter\" % self._url\r\n params = {\r\n 'f' : 'json'\r\n }\r\n if owner or\\\r\n version or\\\r\n description or\\\r\n permission:\r\n if owner:\r\n params['ownerName'] = owner\r\n if version:\r\n params['versionName'] = version\r\n if description:\r\n params['description'] = description\r\n if permission:\r\n params['accessPermission'] = permission\r\n res = self._con.post(url, params)\r\n self._properties = None\r\n return res['success']\r\n return False",
"def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)",
"def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")",
"def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))",
"def command_wrapupdate(self):\n wrapupdater.main(*self.args())",
"def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))",
"def cli(self, env):\n raise NotImplementedError",
"def edit(self):\n\n pass",
"def __call__(self, argv, help):\n from ploy.common import sorted_choices\n parser = argparse.ArgumentParser(\n prog=\"%s fab\" % self.ctrl.progname,\n description=help,\n add_help=False,\n )\n instances = self.ctrl.get_instances(command='init_ssh_key')\n parser.add_argument(\"instance\", nargs=1,\n metavar=\"instance\",\n help=\"Name of the instance from the config.\",\n type=str,\n choices=sorted_choices(instances))\n parser.add_argument(\"fabric_opts\",\n metavar=\"...\", nargs=argparse.REMAINDER,\n help=\"Fabric options\")\n args = parser.parse_args(argv)\n\n instance = instances[args.instance[0]]\n with fabric_integration(self.ctrl, instance, fabcmd=True):\n from fabric.main import main\n fabfile = get_fabfile(instance)\n newargv = ['fab', '-f', fabfile]\n if args.fabric_opts:\n newargv = newargv + args.fabric_opts\n with sys_argv(newargv):\n main()",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():"
] | [
"0.5791912",
"0.5778062",
"0.55881244",
"0.5558738",
"0.5506726",
"0.5498924",
"0.5436379",
"0.54148054",
"0.53646386",
"0.53453696",
"0.53438175",
"0.53438175",
"0.5334068",
"0.53141785",
"0.5312301",
"0.5311755",
"0.52907723",
"0.526777",
"0.5249392",
"0.52193946",
"0.52171355",
"0.5200176",
"0.5197351",
"0.519718",
"0.51892465",
"0.51892465",
"0.51892465",
"0.51892465",
"0.51892465",
"0.51892465"
] | 0.6381094 | 0 |
Gets Tenants for the provided Fabric Manager [Arguments] | def fusion_api_get_fabric_manager_tenants(self, uri, name=None, param='', api=None, headers=None):
param = '/tenants/'
if name:
param += '?&filter="\'name\' == \'%s\'"' % (name)
return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result",
"def get_tenants(self):",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def tenants_for_token(self, context):\n token_ref = self.token_api.get_token(context=context,\n token_id=context['token_id'])\n assert token_ref is not None\n\n user_ref = token_ref['user']\n tenant_refs = []\n for tenant_id in user_ref['tenants']:\n tenant_refs.append(self.identity_api.get_tenant(\n context=context,\n tenant_id=tenant_id))\n return self._format_tenants_for_token(tenant_refs)",
"def get_tenants(self, **kwargs):\n url = self.get_url('tenants', kwargs, ['begin', 'end'])\n return self.api_client.get(url).json()",
"def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body",
"def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)",
"def tenants(self):\n # print \"tenant list is %s\" % self.auth.tenants.list()\n if not self._tenancy:\n self._tenancy = {}\n for tenant in self.auth.tenants.list():\n t = Tenant(tenant, self)\n self._tenancy[t[\"name\"]] = t\n return self._tenancy",
"def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants",
"def manager_agents(self):\n return self.get(\"manager_agents\")",
"def get_treemaker_name_and_class(tm):\n global treemakers\n if isinstance(tm, str):\n if not tm in treemakers:\n raise ValueError(\"No TreeMaker named %s known to hax!\" % tm)\n return tm, treemakers[tm]\n elif isinstance(tm, type) and issubclass(tm, TreeMaker):\n return tm.__name__, tm\n else:\n raise ValueError(\"%s is not a TreeMaker child class or name, but a %s\" % (tm, type(tm)))",
"def get_tautomers_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is tautomer of\")]\n else:\n return []",
"def get_quotas_tenant(self, **_params):\r\n return self.get(self.quota_path % 'tenant', params=_params)",
"def tiers(self, args):\n parser = OptionParser(usage=\"vdc tiers <options>\")\n parser.add_option(\"-n\", \"--name\",\n help=\"The name of the virtual datacenter\", dest=\"name\")\n (options, args) = parser.parse_args(args)\n name = options.name\n if not name:\n parser.print_help()\n return\n\n # Once user input has been read, find the virtual datacenter\n try:\n cloud = self._context.getCloudService()\n vdc = cloud.findVirtualDatacenter(\n VirtualDatacenterPredicates.name(name))\n if vdc:\n tiers = vdc.listStorageTiers()\n pprint_tiers(tiers)\n else:\n print \"No virtual datacenter found with name: %s\" % name\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()",
"def get_managers():\n return {'managers': get_users('managers')}",
"async def getTiers(self, ctx):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n\n if(len(tierList) > 0):\n await self.bot.say(\"Tiers:\")\n for tier in tierList:\n await self.bot.say(tier)\n else:\n await self.bot.say(\":x: No tiers in tier list\")",
"def test_enumerating_tautomers_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n # test the max molecules option\n mol = Molecule.from_smiles(\n \"c1[nH]c2c(=O)[nH]c(nc2n1)N\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n\n tauts_no = 5\n tautomers = mol.enumerate_tautomers(\n max_states=tauts_no, toolkit_registry=toolkit\n )\n assert len(tautomers) <= tauts_no\n assert mol not in tautomers",
"def test_enumerating_tautomers_options(self, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n # test the max molecules option\n mol = Molecule.from_smiles(\n \"c1[nH]c2c(=O)[nH]c(nc2n1)N\",\n toolkit_registry=toolkit,\n allow_undefined_stereo=True,\n )\n\n tauts_no = 5\n tautomers = mol.enumerate_tautomers(\n max_states=tauts_no, toolkit_registry=toolkit\n )\n assert len(tautomers) <= tauts_no\n assert mol not in tautomers",
"def fusion_api_get_san_manager(self, uri=None, param='', api=None, headers=None):\n return self.dm.get(uri=uri, api=api, headers=headers, param=param)",
"def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)",
"def test_module(client):\n client.get_tenant_mappings()",
"def getAllTenantsForUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info",
"def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body",
"def main(argv):\n users = [{'username': 'mini-mon', 'project': 'mini-mon', 'password': 'password', 'role': 'monasca-user'},\n {'username': 'monasca-agent', 'project': 'mini-mon', 'password': 'password', 'role': 'monasca-agent'},\n {'username': 'mini-mon', 'project': 'mini-mon', 'password': 'password', 'role': 'admin'},\n {'username': 'admin', 'project': 'admin', 'password': 'secretadmin', 'role': 'monasca-user'},\n {'username': 'demo', 'project': 'demo', 'password': 'secretadmin', 'role': 'monasca-user'}]\n\n service_host = argv[0]\n url = 'http://' + service_host + ':35357/v2.0'\n\n token = None\n\n cacert = None\n\n if not token:\n username = argv[1]\n password = argv[2]\n tenant_name = argv[3]\n token = get_token(url, cacert, username, password, tenant_name)\n\n key = client.Client(token=token, endpoint=url, cacert=cacert)\n\n tenants = []\n for user in users:\n if 'project' in user and user['project'] not in tenants:\n tenants.append(user['project'])\n\n if not add_tenants(key, tenants):\n return 1\n\n if not add_users(key, users):\n return 1\n\n if not add_user_roles(key, users):\n return 1\n\n monasca_url = 'http://' + service_host + ':8070/v2.0'\n\n if not add_service_endpoint(key, 'monasca', 'Monasca monitoring service', 'monitoring', monasca_url, 'RegionOne'):\n return 1\n\n return 0",
"def get_fabric_switches(self):\n # Leafs\n class_query = ClassQuery('fabricNode')\n class_query.propFilter = 'eq(fabricNode.role, \"leaf\")'\n leafs = self.moDir.query(class_query)\n # Two lists are created, one for the distinguished names and other for the relative names\n dns = []\n rns = []\n for leaf in leafs:\n dns.append(str(leaf.dn))\n rns.append(str(leaf.rn))\n # Spines\n class_query = ClassQuery('fabricNode')\n class_query.propFilter = 'eq(fabricNode.role, \"spine\")'\n spines = self.moDir.query(class_query)\n for spine in spines:\n dns.append(str(spine.dn))\n rns.append(str(spine.rn))\n # Need to be human sorted (e.g 1,2,3,11 and not 1,11,2,3)\n dns.sort(key=natural_keys)\n rns.sort(key=natural_keys)\n return dns, rns",
"def taskmanager_factory(taskmanager_toolkit):\n\n taskmanager_toolkits = {'simple': taskmanager.AdaptSimple()}\n\n if taskmanager_toolkit not in taskmanager_toolkits.keys():\n raise ValueError('unsupported TaskManager name: ' + taskmanager_toolkit)\n\n return taskmanager_toolkits[taskmanager_toolkit]",
"def getAllTenants(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def get_all(self, context, type_):\n types = None\n if type_ and isinstance(type_, basestring):\n types = type_.strip(\",\").split(\",\")\n\n try:\n db_resource_mgrs_data = self.db_api.get_all_resource_managers(\n context, types=types)\n\n _resource_mgrs_data = []\n for db_resource_mgr_data in db_resource_mgrs_data:\n _resource_mgrs_data.append(_make_response(\n db_resource_mgr_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resource managers' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resource_mgrs_data"
] | [
"0.5958651",
"0.584731",
"0.57416695",
"0.5416656",
"0.5394366",
"0.5332738",
"0.52771735",
"0.5249437",
"0.5218875",
"0.51591974",
"0.5118933",
"0.5095858",
"0.5093036",
"0.5080304",
"0.50682324",
"0.4968553",
"0.4935974",
"0.4935974",
"0.49077785",
"0.4867381",
"0.48321927",
"0.4821849",
"0.4820463",
"0.47965038",
"0.47856203",
"0.47789288",
"0.4759701",
"0.47511345",
"0.4737263",
"0.4711261"
] | 0.70706767 | 0 |
Initiates Fabric Manager Refresh using Snapshot API [Arguments] | def fusion_api_fabric_manager_refresh(self, body, uri, api=None, headers=None):
param = '/snapshot/'
return self.fabricmanager.put(body=body, uri=uri, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Refresh(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"refresh\", payload=payload, response_object=None)",
"def testRefresh(self):\n \n pass",
"async def _service_refresh(self, part: str) -> None:\n _LOGGER.debug(\"Manually refresh %s\", part)\n event = REFRESH_STR_TO_EVENT_DTO.get(part, None)\n if event:\n self._vacuum_bot.events.request_refresh(event)\n elif part == REFRESH_MAP:\n self._vacuum_bot.map.refresh()\n else:\n _LOGGER.warning('Service \"refresh\" called with unknown part: %s', part)",
"def _Refresh(self):\n raise NotImplementedError",
"def command_refresh_repo(self):\n repoinit.refresh(*self.args())",
"def snap_refresh(packages, *flags):\n if type(packages) is not list:\n packages = [packages]\n\n flags = list(flags)\n\n message = 'Refreshing snap(s) \"%s\"' % ', '.join(packages)\n if flags:\n message += ' with options \"%s\"' % ', '.join(flags)\n\n log(message, level='INFO')\n return _snap_exec(['refresh'] + flags + packages)",
"def refresh_screen(self):",
"def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)",
"def snapshot_image_on_provider(self, builder, provider, credentials, target, template, parameters):",
"def refresh(dataset, client):\n pass",
"def refresh():\n return __apf_cmd(\"-e\")",
"def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True",
"def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True",
"def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot",
"def Run(self, args):\n snapshot_ref = args.CONCEPTS.snapshot.Parse()\n\n if args.CONCEPTS.volume.Parse() is None:\n raise exceptions.RequiredArgumentException(\n '--volume', 'Requires a volume to update snapshot of')\n\n client = snapshots_client.SnapshotsClient(self._RELEASE_TRACK)\n labels_diff = labels_util.Diff.FromUpdateArgs(args)\n original_snapshot = client.GetSnapshot(snapshot_ref)\n\n # Update labels\n if labels_diff.MayHaveUpdates():\n labels = labels_diff.Apply(\n client.messages.Snapshot.LabelsValue, original_snapshot.labels\n ).GetOrNone()\n else:\n labels = None\n\n snapshot = client.ParseUpdatedSnapshotConfig(\n original_snapshot, description=args.description, labels=labels\n )\n\n updated_fields = []\n # add possible updated snapshot fields\n # TODO(b/243601146) add config mapping and separate config file for update\n if args.IsSpecified('description'):\n updated_fields.append('description')\n if (\n args.IsSpecified('update_labels')\n or args.IsSpecified('remove_labels')\n or args.IsSpecified('clear_labels')\n ):\n updated_fields.append('labels')\n update_mask = ','.join(updated_fields)\n\n result = client.UpdateSnapshot(\n snapshot_ref, snapshot, update_mask, args.async_\n )\n if args.async_:\n command = 'gcloud {} netapp volumes snapshots list'.format(\n self.ReleaseTrack().prefix\n )\n log.status.Print(\n 'Check the status of the updated snapshot by listing all snapshots:\\n'\n ' $ {} '.format(command)\n )\n return result",
"def refresh_view():\n pass",
"def refresh_status() -> None:\n ...",
"def main():\n\n args = get_args()\n\n try:\n if args.disable_ssl_verification:\n service_instance = connect.SmartConnectNoSSL(host=args.host,\n user=args.user,\n pwd=args.password,\n port=int(args.port))\n else:\n service_instance = connect.SmartConnect(host=args.host,\n user=args.user,\n pwd=args.password,\n port=int(args.port))\n\n atexit.register(connect.Disconnect, service_instance)\n\n content = service_instance.RetrieveContent()\n\n # Retrieve Datastore Object\n datastore = disk.get_obj(content, [vim.Datastore], args.datastore)\n\n # Retrieve FCD Object\n vdisk = disk.retrieve_fcd(content, datastore, args.vdisk)\n\n # Retrieve Snapshot Object\n snapshot = disk.retrieve_fcd_snapshot(\n content, datastore, vdisk, args.snapshot)\n\n # Confirming Snapshot deletion\n if not args.yes:\n response = cli.prompt_y_n_question(\"Are you sure you want to \"\n \"delete snapshot '\" +\n args.snapshot + \"'?\",\n default='no')\n if not response:\n print(\"Exiting script. User chose not to delete snapshot.\")\n exit()\n\n # Delete FCD Snapshot\n storage = content.vStorageObjectManager\n task = storage.DeleteSnapshot_Task(\n vdisk.config.id, datastore, snapshot)\n tasks.wait_for_tasks(service_instance, [task])\n\n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0",
"def link_snapshot(argstr):\n pass",
"def refresh(self, url, args, cancellationSignal):\n pass",
"def refresh(self) -> object:\n requestor = Requestor(local_api_key=self._api_key)\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=self._retrieve_params)\n self.refresh_from(values=response, api_key=api_key)\n return self",
"def __call__(\n self,\n request: pubsub.GetSnapshotRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Snapshot:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"get\",\n \"uri\": \"/v1/{snapshot=projects/*/snapshots/*}\",\n },\n ]\n request, metadata = self._interceptor.pre_get_snapshot(request, metadata)\n pb_request = pubsub.GetSnapshotRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Snapshot()\n pb_resp = pubsub.Snapshot.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_get_snapshot(resp)\n return resp",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"async def _device_refresh(self, **kwargs):\n\n device_id = self._device_id\n if not device_id:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n if self._use_channel_info:\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=_command(COMMAND_REFRESH),\n raise_for_status=False,\n ) as resp:\n if resp.status == 409:\n self._state = STStatus.STATE_OFF\n return\n resp.raise_for_status()\n await resp.json()\n\n return",
"def on_refresh(self):\n pass",
"def xtest_snapshot_api(self):\n\n req = httplib2.Http(\".cache\")\n body = r\"\"\"{ \"snapshot\": { \"instanceId\": \"123\", \"name\": \"dbapi_test\" } }\"\"\"\n \n # Test creating an snapshot without a body in the request.\n LOG.info(\"* Creating an snapshot without a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test creating an snapshot with a malformed body.\n LOG.info(\"* Creating an snapshot with a malformed body\")\n bad_body = r\"\"\"{ \"snapshot\": {}]\"\"\"\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", bad_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n\n # Test listing all snapshots with a body in the request.\n LOG.info(\"* Listing all snapshots with a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test listing all snapshots for a specific instance with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\")\n resp, content = req.request(API_URL + \"snapshots?instanceId=\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test listing all snapshots for a specific tenant with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\") \n resp, content = req.request(API_URL + \"snapshots?tenantId=\" + TENANT_ID, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test getting a non-existent snapshot.\n LOG.info(\"* Getting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test creating a new instance from a dummy snapshot.\n instance_body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"102\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.1.2\"\n },\n \"databases\": [\n {\n \"name\": \"testdb\",\n \"character_set\": \"utf8\",\n \"collate\": \"utf8_general_ci\"\n },\n {\n \"name\": \"abcdefg\"\n }\n ],\n \"volume\":\n {\n \"size\": \"2\"\n }\n }\n }\"\"\"\n \n LOG.info(\"* Creating instance from dummy snapshot\")\n snap_body = json.loads(instance_body)\n snap_body['instance']['snapshotId'] = \"dummy\"\n snap_body = json.dumps(snap_body)\n resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n content = json.loads(content)\n self.assertEqual(500, resp.status)\n \n # This test is handled by the error handling in the API server\n# # Test creating a new instance from bad snapshot data in the body.\n# LOG.debug(\"* Creating instance from bad snapshot data in the body\")\n# snap_body = json.loads(instance_body)\n# snap_body['instance']['snapshotId'] = {}\n# snap_body = json.dumps(snap_body)\n# resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n# LOG.debug(resp)\n# LOG.debug(content)\n# content = json.loads(content)\n# self.assertEqual(500, resp.status) \n \n # Test deleting a non-existent snapshot.\n LOG.info(\"* Deleting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"DELETE\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)",
"def __call__(\n self,\n request: pubsub.UpdateSnapshotRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Snapshot:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"patch\",\n \"uri\": \"/v1/{snapshot.name=projects/*/snapshots/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_update_snapshot(request, metadata)\n pb_request = pubsub.UpdateSnapshotRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Snapshot()\n pb_resp = pubsub.Snapshot.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_update_snapshot(resp)\n return resp",
"def restore_from_snapshot(SnapshotId=None):\n pass"
] | [
"0.58108914",
"0.56057084",
"0.55814093",
"0.55398834",
"0.5529131",
"0.5527788",
"0.55108947",
"0.53939646",
"0.5334728",
"0.530829",
"0.529463",
"0.52867246",
"0.52867246",
"0.52739894",
"0.523049",
"0.5227205",
"0.5202879",
"0.5193885",
"0.51764804",
"0.5140236",
"0.51358724",
"0.5133438",
"0.51273054",
"0.51273054",
"0.51273054",
"0.5119686",
"0.5119033",
"0.51132035",
"0.5104047",
"0.51000446"
] | 0.6941463 | 0 |
Gets Reports for Fabric Manager [Arguments] Uri of the existing Fabric Manager or Tenant | def fusion_api_get_fabric_manager_report(self, uri, api=None, headers=None):
param = '/report/'
return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_get_fabric_manager(self, uri=None, param='', api=None, headers=None):\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)",
"def fusion_api_get_fabric_manager_tenants(self, uri, name=None, param='', api=None, headers=None):\n param = '/tenants/'\n if name:\n param += '?&filter=\"\\'name\\' == \\'%s\\'\"' % (name)\n return self.fabricmanager.get(uri=uri, api=api, headers=headers, param=param)",
"def get_reports_command(\n client: Client, args: Dict[str, Any]\n) -> Union[str, Dict[str, Any]]:\n # Validate arguments\n params = get_reports_params(args)\n\n # Preparing header\n headers = {\n 'X-FeApi-Token': client.get_api_token(),\n 'Accept': CONTENT_TYPE_JSON,\n }\n\n # API call\n resp: Response = client.http_request(\n method='GET',\n url_suffix=URL_SUFFIX['GET_REPORTS'],\n params=params,\n headers=headers,\n )\n\n # Create file from Content\n if int(resp.headers.get('Content-Length', '')) > 0:\n file_entry = fileResult(\n filename=generate_report_file_name(args),\n data=resp.content,\n file_type=EntryType.ENTRY_INFO_FILE,\n )\n return file_entry\n else:\n return MESSAGES['NO_RECORDS_FOUND'].format('report contents')",
"def QueryReports(self, parameters):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/falconx-sandbox/QueryReports\n FULL_URL = self.base_url+'/falconx/queries/reports/v1'\n HEADERS = self.headers\n PARAMS = parameters\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, params=PARAMS, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n\n return returned",
"def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)",
"def get_fmriprep_reports_cli(config_file, output_name, clear_temp, debug):\n from .get_reports import get_reports\n get_reports(config_file, output_name, debug, clear_temp=clear_temp)",
"def access_stacks_report_list(context, endpoint, parameter='', history=''):\n url = urljoin(context.gemini_api_url, '{ep}/{param}'.format(ep=endpoint, param=parameter))\n context.response = requests.get(url)\n context.history = True if history == 'history' else False",
"def getPortletReports(context):\n\n return getReports(context, category=\"Portlet\")",
"def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body",
"def reports_cli():",
"def GenerateReport(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"generateReport\", payload=payload, response_object=None)",
"def GenerateReport(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"generateReport\", payload=payload, response_object=None)",
"def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def view(self, *args, **kwargs):\n return self._resources_manager.view(*args, **kwargs)",
"def uri(self) -> list:\n raise NotImplementedError(\"ErddapArgoDataFetcher.uri not implemented\")",
"def amtool_receivers(self, mess, args):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_receivers()\n return result",
"def call_link_reports(args) ->None:\n\n if not args['no_cmd']:\n print_link_reports(args['report-id'])\n if args['yaml']:\n yaml_file(args['report-id'])\n if args['csv']:\n csv_file(args['report-id'])\n if args['json']:\n json_file(args['report-id']) \n\n config.logger.info(\"Link Report generated according to the format chosen by user\")",
"def fusion_api_get_san_manager(self, uri=None, param='', api=None, headers=None):\n return self.dm.get(uri=uri, api=api, headers=headers, param=param)",
"def catalog_alias_get(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n response = alias.retrieve()\n if not args.quiet:\n pp(response)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e",
"def reports_email(self):\r\n return reports.ReportsEmail(self)",
"def request(self, **kwargs):\n if not hasattr(self, kwargs['report']):\n raise AttributeError(f'Report {kwargs[\"report\"]} not exist')\n report_name = kwargs.pop('report')\n return getattr(self, report_name)(**kwargs)",
"def fetch(self, limit=0, offset=0, report_type=\"\"):\n params = {\n \"limit\": limit,\n \"offset\": offset,\n \"type\": report_type,\n }\n return self._phishdetect.get(API_PATH[\"reports_fetch\"], params=params)",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def GetSummaryReports(self, parameters):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/falconx-sandbox/GetSummaryReports\n FULL_URL = self.base_url+'/falconx/entities/report-summaries/v1'\n HEADERS = self.headers\n PARAMS = parameters\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, params=PARAMS, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n\n return returned",
"def main():\n r = ReportHelper()\n today = dt.today()\n\n start_date = (today - timedelta(days=1)).strftime('%Y-%m-%d')\n end_date = today.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(start_date, end_date, 'daily')\n logger.debug('Daily report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n logger.debug(json.dumps(ingestion_results, indent=2))\n\n if time_to_generate_monthly_report(today):\n last_day_of_prev_month = date(today.year, today.month, 1) - timedelta(days=1)\n last_month_first_date = last_day_of_prev_month.strftime('%Y-%m-01')\n last_month_end_date = last_day_of_prev_month.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(last_month_first_date,\n last_month_end_date,\n 'monthly')\n logger.debug('Monthly report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n\n return response",
"def get_report(ctx, report_ids):\n client = ctx.obj[\"client\"]\n for report_id in report_ids:\n report = client.get_report(report_id)\n click.secho(report.detailed)",
"def run(request_handler, name, generic_result_set=True, **kwargs) -> Union[List, Dict]:\n params = {\n 'genericResultSet': generic_result_set,\n 'pretty': False\n }\n for param in kwargs.keys():\n params['R_{}'.format(param)] = kwargs[param]\n\n return request_handler.make_request('GET', '/runreports/{}'.format(name), params=params)",
"def fusion_api_get_fabric(self, uri=None, param='', api=None, headers=None):\n return self.fabric.get(uri=uri, api=api, headers=headers, param=param)",
"def get_reporters(self):\n url = self._get_api_url() + 'reporters'\n reporters = self._request(url)\n return reporters.json()",
"def get(self, campaign_id, report_id, **queryparams):\n self.campaign_id = campaign_id\n self.report_id = report_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'abuse-reports', report_id), **queryparams)"
] | [
"0.56766737",
"0.54050255",
"0.5230322",
"0.5214337",
"0.5190606",
"0.5153666",
"0.5005133",
"0.4954604",
"0.49541247",
"0.4939854",
"0.49134123",
"0.49134123",
"0.48381823",
"0.481615",
"0.48036653",
"0.47944608",
"0.47694454",
"0.47487447",
"0.4656791",
"0.4603878",
"0.4581687",
"0.4581346",
"0.45648563",
"0.4560479",
"0.45559853",
"0.4543266",
"0.452955",
"0.45165148",
"0.4493364",
"0.44734365"
] | 0.70640254 | 0 |
Adds an rackmaager to the appliance [Arguments] | def fusion_api_add_rack_manager(self, body, api=None, headers=None):
return self.rackmanager.post(body, api, headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_add_rack(self, body, api=None, headers=None):\n return self.rack.create(body, api, headers)",
"def add(self, arguments):\n url = arguments['<location>']\n if url:\n name = arguments['<name>']\n else:\n url = arguments['<name>']\n name = None\n version = arguments['--box-version']\n force = arguments['--force']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n utils.add_box(url, name=name, version=version, force=force, requests_kwargs=requests_kwargs)",
"def create_rack(self, datacenter, name, vlan_id_min, vlan_id_max, nrsq):\n log.info(\"Adding rack %s...\" % name)\n rack = Rack.builder(self.__context, datacenter) \\\n .name(name) \\\n .vlanIdMin(vlan_id_min) \\\n .vlanIdMax(vlan_id_max) \\\n .nrsq(nrsq) \\\n .build()\n rack.save()\n return rack",
"def add_animals(self, *args):\n if self.validate_requirements(args):\n [self.animals.append(arg) for arg in args]\n else:\n print(\"foobar\")",
"def do_add(self, args):\n argument_list = args.split()\n if len(argument_list) < 1:\n self.__bad_arguments(\"add\")\n else:\n print \"Added \" + args + \".\"\n AssassinsManager.add_assassin(self.assassins_manager, args.split()[0])",
"def set_rack(self, rack):\n self.rack = rack\n self.barcode = rack.barcode",
"def add(isamAppliance, name, chainName, requestType, description=None, tokenType=None, xPath=None, signResponses=None,\n signatureKey=None, validateRequests=None, validationKey=None, sendValidationConfirmation=None, issuer=None,\n appliesTo=None, properties=None, check_mode=False, force=False):\n warnings = []\n if force is False:\n ret_obj = search(isamAppliance, name)\n\n if force is True or ret_obj['data'] == {}:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True, warnings=warnings)\n else:\n ret_obj = templates.search(isamAppliance, name=chainName)\n if ret_obj['data'] == {}:\n warnings.append(\"Unable to find a valid STS Chain Template for {0}\".format(chainName))\n else:\n chainId = ret_obj['data']\n json_data = {\n \"name\": name,\n \"chainId\": chainId,\n \"requestType\": requestType\n }\n if description is not None:\n json_data['description'] = description\n if tokenType is not None:\n json_data['tokenType'] = tokenType\n if xPath is not None:\n json_data['xPath'] = xPath\n if signResponses is not None:\n json_data['signResponses'] = signResponses\n if signatureKey is not None:\n json_data['signatureKey'] = signatureKey\n if validateRequests is not None:\n json_data['validateRequests'] = validateRequests\n if validationKey is not None:\n json_data['validationKey'] = validationKey\n if sendValidationConfirmation is not None:\n json_data['sendValidationConfirmation'] = sendValidationConfirmation\n if issuer is not None:\n json_data['issuer'] = issuer\n if appliesTo is not None:\n json_data['appliesTo'] = appliesTo\n if properties is not None:\n for idx, x in enumerate(properties['self']):\n if \"map.rule.reference.names\" in x['name']:\n ret_obj1 = mapping_rules.search(isamAppliance, x['value'][0])\n properties['self'].append(\n {\"name\": x['prefix'] + \".map.rule.reference.ids\", \"value\": [ret_obj1['data']]})\n del properties['self'][idx]\n json_data['properties'] = properties\n return isamAppliance.invoke_post(\n \"Create an STS chain\", uri, json_data,\n requires_modules=requires_modules,\n requires_version=requires_version, warnings=warnings)\n\n return isamAppliance.create_return_object(warnings=warnings)",
"def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')",
"def add_app_arguments(self, parser: argparse.ArgumentParser):\n pass",
"def add_app_arguments(self, parser: argparse.ArgumentParser) -> None:\n pass",
"def add(isvgAppliance, name, trapAddress, trapCommunity, trapNotificationType=None, trapVersion='V1', trapPort=162,\n objType='snmp', username=None, authEnabled=None, authType=None, authPassPhrase=None, privEnabled=None,\n privType=None, privPassPhrase=None, informSnmpEngineID=None, informTimeout=None, comment='', check_mode=False,\n force=False):\n if force is True or _check(isvgAppliance, None, name, trapAddress, trapCommunity, trapNotificationType, trapVersion,\n trapPort, objType, username, authEnabled, authType, authPassPhrase, privEnabled,\n privType, privPassPhrase, informSnmpEngineID, informTimeout, comment) is False:\n if check_mode is True:\n return isvgAppliance.create_return_object(changed=True)\n else:\n return isvgAppliance.invoke_post(\n \"Add a snmp object\",\n \"/rsp_snmp_objs/\",\n {\n 'name': name,\n 'objType': objType,\n 'comment': comment,\n 'trapAddress': trapAddress,\n 'trapPort': trapPort,\n 'trapCommunity': trapCommunity,\n 'trapVersion': trapVersion,\n 'trapNotificationType': trapNotificationType,\n 'userName': username,\n 'authEnabled': authEnabled,\n 'authType': authType,\n 'authPassPhrase': authPassPhrase,\n 'privEnabled': privEnabled,\n 'privType': privType,\n 'privPassPhrase': privPassPhrase,\n 'informSnmpEngineID': informSnmpEngineID,\n 'informTimeout': informTimeout\n })\n\n return isvgAppliance.create_return_object()",
"def add_argument(self, parser):\n parser.add_argument(*self.args, **self.kwargs)",
"def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")",
"def application_add(p_engine, p_username, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n # create new object of Application class\n\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n appnew = DxApplication(engine_obj)\n # set a name\n appnew.create_application(application_name=appname)\n\n # add Application to engine and list\n # rc is None all is OK\n\n if applist.add(appnew):\n ret = ret + 1\n\n return ret",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_argument(self, *args, **kwargs):\n self.parser.add_argument(*args, **kwargs)",
"def add_app(self):\n \n pass",
"def GroundExcelAddEnemyArmorType(builder, EnemyArmorType):\n return AddEnemyArmorType(builder, EnemyArmorType)",
"def addArguments(self, parser):\r\n parser.add_argument('destination', action='store', help='Destination for the new Apex Test class')",
"def post_service_appliance_create(self, resource_dict):\n pass",
"def add_application(\n self,\n app_stage: aws_cdk.core.Stage,\n *,\n manual_approvals: typing.Optional[bool] = None,\n ) -> None:\n options = AddStageOptions(manual_approvals=manual_approvals)\n\n return jsii.invoke(self, \"addApplication\", [app_stage, options])",
"def _apphot_one(args):\n return apphot_one(*args)",
"def AddBackupRunId(parser):\n parser.add_argument(\n 'id',\n type=arg_parsers.BoundedInt(lower_bound=1, unlimited=True),\n help=(\n 'The ID of the backup run. You can find the ID by running '\n '$ gcloud sql backups list -i {instance}.'\n ),\n )",
"def inventoryAdd(obj):\n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()",
"def add_arguments(self, parser):\n parser.add_argument('asins', nargs='+', type=str)",
"def vm_diskadd(args):\n name = args.name\n size = args.size\n template = args.template\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if size is None:\n common.pprint(\"Missing size. Leaving...\", color='red')\n os._exit(1)\n if pool is None:\n common.pprint(\"Missing pool. Leaving...\", color='red')\n os._exit(1)\n if name is None:\n common.pprint(\"Missing name. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding disk to %s...\" % name)\n k.add_disk(name=name, size=size, pool=pool, template=template)",
"def add_handout(self, asset_name):\r\n self._handouts.append(asset_name)",
"def add_cli_arg(self, name, value):\n if value:\n self._cli[name] = value",
"def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')"
] | [
"0.5335254",
"0.52000356",
"0.519472",
"0.5162352",
"0.5091715",
"0.50824",
"0.50122625",
"0.5003473",
"0.49970138",
"0.49732828",
"0.49596536",
"0.4910242",
"0.48956198",
"0.4864047",
"0.48495474",
"0.48495474",
"0.48495474",
"0.48479155",
"0.48257363",
"0.48086825",
"0.47879982",
"0.4781859",
"0.47752607",
"0.47293097",
"0.47051132",
"0.47045335",
"0.46961802",
"0.46931478",
"0.4690526",
"0.46659195"
] | 0.55991477 | 0 |
Gets a Rack Manager [Arguments] | def fusion_api_get_rack_manager(self, uri=None, api=None, headers=None):
return self.rackmanager.get(uri=uri, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cli_arguments(self):\n pass",
"def get_options():\n parser = argparse.ArgumentParser(\n description=\"view the aria2 queue on localhost:6800\",\n )\n # parser.add_argument() calls here\n options = parser.parse_args()\n # extra processing of options here\n return options",
"def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)",
"def GetArgs():\n\tparser = argparse.ArgumentParser(description='Process args for retrieving all the Virtual Machines')\n\tparser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to')\n\tparser.add_argument('-o', '--port', type=int, default=443,\taction='store', help='Port to connect on')\n\tparser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host')\n\tparser.add_argument('-p', '--password', required=True, action='store', help='Password to use when connecting to host')\n\tparser.add_argument('-m', '--vm', required=True, action='store', help='On eor more Virtual Machines to report on')\n\tparser.add_argument('-i', '--int', type=int, default=15, action='store', help='Interval to average the vSphere stats over')\n\targs = parser.parse_args()\n\treturn args",
"def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser",
"def parse_args():\n\n parser = argparse.ArgumentParser(description=\"Benchmark Thing WoT server\")\n parser = utils.extend_server_arg_parser(parser)\n\n return parser.parse_args()",
"def fusion_api_get_hypervisor_manager(self, uri=None, param='', api=None, headers=None):\n return self.hypervisor_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def GetArgs():\n parser = argparse.ArgumentParser(\n description='Process args for retrieving all the Virtual Machines')\n parser.add_argument('-s', '--host', required=True, action='store',\n help='Remote host to connect to')\n parser.add_argument('-o', '--port', type=int, default=443, action='store',\n help='Port to connect on')\n parser.add_argument('-u', '--user', required=True, action='store',\n help='User name to use when connecting to host')\n parser.add_argument('-p', '--password', required=False, action='store',\n help='Password to use when connecting to host')\n args = parser.parse_args()\n return args",
"def get_args():\n\n parser = get_argument_parser()\n args = parser.parse_args()\n\n return args",
"def parse_args():\n parser = argparse.ArgumentParser(\n description='Farm')\n parser.add_argument(\n '-s', '--source',\n help='Harvest source (AMQP host such as amqp://guest:guest@localhost:5672)',\n required=True)\n parser.add_argument(\n '-q', '--queue',\n help='Queue name to harvest from',\n required=True)\n parser.add_argument(\n '-a', '--add',\n help='Harvester instance (file)',\n required=True,\n type=argparse.FileType('rb'))\n return vars(parser.parse_args())",
"def get_args():\n parser = argparse.ArgumentParser(\n description=\"Obstacle avoidance python script.\"\n )\n\n # Required arguments\n parser.add_argument(\"-n\", \"--number\",\n action=\"store\",\n required=False,\n help=\"Add a pheeno number namespace.\",\n default=\"\")\n\n # The rationale behind rospy.myargv()[1:] is provided here:\n # https://groups.google.com/a/rethinkrobotics.com/forum/#!topic/brr-users/ErXVWhRmtNA\n return parser.parse_args(rospy.myargv()[1:])",
"def get_parser():\n parser = ArgumentParser(description=\"Script used to generate Freeplane \"\n + \"mindmap files\")\n\n # This is use when people in Linaro aren't using their email address.\n parser.add_argument('--disable-altname', required=False,\n action=\"store_true\", default=False,\n help=\"Use alternative names (from cfg.yaml) to the tree\")\n\n parser.add_argument('--assignee', required=False,\n action=\"store_true\", default=False,\n help=\"Add assignees (from cfg.yaml) to the tree\")\n\n parser.add_argument('-a', '--author', required=False,\n action=\"store_true\", default=False,\n help=\"If set, git statistic only count the commit \"\n + \"from the author\")\n\n parser.add_argument('-p', '--path', required=False, action=\"store\",\n default=\"/home/jyx/devel/optee_projects/reference/linux\",\n help='Full path to the kernel tree')\n\n parser.add_argument('-s', '--since', required=False, action=\"store\",\n default=None,\n help='Used with the git log --since command')\n\n parser.add_argument('-o', '--output', required=False, action=\"store\",\n default=\"linux-kernel.mm\",\n help='Output filename')\n\n parser.add_argument('-v', required=False, action=\"store_true\",\n default=False,\n help='Output some verbose debugging info')\n\n return parser",
"def get_parser():\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-geo\", \"--sra\",\n dest=\"sra\",\n type=str,\n required =True,\n help=\"GSM or GSE geo number\")\n parser.add_argument(\"-s3\", \"--s3_bucket\",\n dest=\"s3\",\n type=str,\n default='',\n help=\"s3 bucket if desired.\")\n parser.add_argument(\"-out\", \"-o\",\n dest=\"directoryLabel\",\n type=str,\n default='',\n help=\"path to download files.\")\n parser.add_argument(\"-e\", \"-email\",\n dest=\"email\",\n type=str,\n default='',\n help=\"Email for ncbi tools.\")\n parser.add_argument(\"-local\",\n dest=\"local_manifest\",\n default=False,\n action='store_true',\n help=\"Make manifest of files for GSE.\")\n parser.add_argument(\"-s3_manifest\",\n dest=\"s3_manifest\",\n default=False,\n action='store_true',\n help=\"Make manifest of files for GSE.\")\n parser.add_argument(\"-p, --processes\",\n dest=\"processes\",\n default=20,\n type=int,\n help=\"Number of concurrent submissions to make.\")\n parser.add_argument(\"-t, --threads\",\n dest=\"threads\",\n default=20,\n type=int,\n help=\"Number of parallel downloads per sra file.\")\n\n if len(sys.argv)==1:\n parser.print_help(sys.stderr)\n from qt5_gui_sra import Graph_UI\n from PyQt5.QtWidgets import QApplication\n app = QApplication(sys.argv)\n qt5_data = Graph_UI()\n app.exec_()\n return ('qt', qt5_data)\n args = parser.parse_args()\n return ('arg',args)",
"def _get_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--env', '-e',\n type=str,\n default='Zelda1-v0',\n choices=['Zelda1-v0'],\n help='The environment to play'\n )\n parser.add_argument('--mode', '-m',\n type=str,\n default='human',\n choices=['human', 'random'],\n help='The execution mode for the environment.'\n )\n parser.add_argument('--steps', '-s',\n type=int,\n default=500,\n help='The number of random steps to take.',\n )\n return parser.parse_args()",
"def get_args():\n\n parser = argparse.ArgumentParser(description=\"List the currently running clusters\")\n\n parser.add_argument(\"--help-ext\",\n help=\"Print the extended help\",\n action=\"store_true\",\n required=False)\n\n args = parser.parse_args()\n\n return args",
"def parse_arguments(self,parser):\r\n return parser.parse_args()",
"def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args",
"def get_argparser(self):\n parser = argparse.ArgumentParser(description='Command Configuration')\n parser.add_argument('--coin', choices=['bitcoin', 'ethereum', 'litecoin'], default='bitcoin')\n parser.add_argument('--start_date', default='2019-10-21')\n parser.add_argument('--end_date', default='2019-10-31')\n parser.add_argument('--language', choices=['en', 'it', 'es', 'fr', 'de', 'ru', 'zh'], default='en')\n\n argparser = parser.parse_args()\n return argparser.__dict__",
"def get_args():\n parser = ArgumentParser(description='main interface to provision system')\n parser.add_argument('--region-list', help='list of regions for provisioning purposes',\n required=True, nargs='+')\n parser.add_argument('--outfile', help='file to save region secrets to', required=True)\n args = parser.parse_args()\n return args.region_list, args.outfile",
"def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()",
"def get_arguments(self, local_machine):\n\n parser = argparse.ArgumentParser()\n\n if local_machine == \"client\":\n parser.add_argument(\"host\", help=\"target machine's host\")\n parser.add_argument(\"port\", help=\"target machine's port\", type=int)\n\n all_requests = parser.add_subparsers(help='all commands for server', dest='request', required=True)\n put_request = all_requests.add_parser('put', help='puts the specified file onto server')\n get_request = all_requests.add_parser('get', help='retrieves the specified file from server')\n all_requests.add_parser('list', help='lists the server directory')\n\n for request in put_request, get_request:\n request_help = \"file to transfer to server\" if request == put_request else \"file to retrieve from server\"\n request.add_argument('filename', help=request_help)\n\n elif local_machine == \"server\":\n parser.add_argument(\"port\", help=\"target port for listening to connections\", type=int)\n\n args = parser.parse_args()\n\n if args.port < 0 or args.port > 65535:\n raise parser.error(StatusCode.code[2002])\n self.port = args.port\n\n if local_machine == \"client\":\n self.host = args.host\n self.request = args.request\n if self.request != \"list\":\n self.file = args.filename",
"def start(self):\n return self._args[0]",
"def get_args():\n parser = argparse.ArgumentParser(\n description='Some Basic Spark Job doing some stuff on IMDb data stored within HDFS.')\n return parser.parse_args()",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--server_name', help='Name of rabbit server',\n required=False, default='localhost')\n parser.add_argument('--queue_name', help='Name of rabbit queue',\n required=False, default='temp')\n return vars(parser.parse_args())",
"def get_parser_arguments():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--students', type=str, default=None, help='Path to the students json file')\n parser.add_argument('-r', '--rooms', type=str, default=None, help='Path to the rooms json file')\n parser.add_argument('-f', '--format', choices=['xml', 'json'], type=str.lower, default=None,\n help='Output format of the results')\n return parser",
"def get_arguments():\n parser = argparse.ArgumentParser(description=\"Simple Jarvice CLI\",\n add_help=False)\n auth_group = parser.add_argument_group('auth', description='Configuration')\n auth_group.add_argument('-username', help='Jarvice username')\n auth_group.add_argument('-apikey', help='Jarvice API key')\n auth_group.add_argument('-apiurl', help='Jarvice API URL',\n default='https://api.jarvice.com')\n auth_group.add_argument('-v', help='loglevel',\n choices=['INFO', 'WARN', 'DEBUG', 'CRITICAL'],\n dest='loglevel', default='CRITICAL')\n auth_group.add_argument(\n 'command',\n choices=['connect', 'submit', 'info', 'status',\n 'action', 'terminate', 'shutdown', 'jobs',\n 'output', 'tail', 'apps', 'machines', 'summary',\n 'download', 'upload', 'wait_for', 'shutdown_all',\n 'terminate_all', 'ls'])\n\n known, unknown = parser.parse_known_args()\n return known, unknown, parser",
"def fusion_api_get_deployment_manager(self, uri=None, param='', api=None, headers=None):\n return self.dep_mgr.get(uri=uri, api=api, headers=headers, param=param)",
"def get_env(self, *args):\n m = module(*args)\n return m.env",
"def args(self):\n return self._parse_args",
"def getParserArgs():\n parser = argparse.ArgumentParser(description=\"Parse radio configurator output into header file.\")\n parser.add_argument('-o', '--output_dir', type=str, default=None, help=\"The output dir.\")\n parser.add_argument('-p', '--phy_name', type=str, default=\"PHY_RAIL\", help=\"Build configuration for a specific PHY\")\n args = parser.parse_args()\n return args"
] | [
"0.55990356",
"0.5466417",
"0.54518133",
"0.54118025",
"0.5386328",
"0.53435844",
"0.5313144",
"0.52774763",
"0.5259597",
"0.52551603",
"0.5208383",
"0.5194557",
"0.51910126",
"0.5186137",
"0.5174422",
"0.5167607",
"0.5161358",
"0.51293916",
"0.5108545",
"0.5103814",
"0.5102019",
"0.50993174",
"0.5093454",
"0.50874615",
"0.50797665",
"0.507335",
"0.5067359",
"0.50579447",
"0.50506836",
"0.50363255"
] | 0.575111 | 0 |
Deletes Rack Manager from the appliance based on uri [Arguments] | def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):
return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_os_deploymentserver(self, name=None, uri=None, param='', api=None, headers=None):\n return self.osds.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def fusion_api_delete_deployment_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dep_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)",
"def _delete_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def fusion_api_delete_fabric_manager(self, name, uri=None, api=None, headers=None):\n return self.fabricmanager.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_delete_lsg(self, name=None, uri=None, api=None, headers=None):\n return self.lsg.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete(self, oid):\n path = '%s/routers/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack router: %s' % truncate(res))\n return res[0]",
"def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')",
"def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]",
"def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data",
"def remove(url: str):\n authenticated = credentials.authenticate(url)\n REMOVER_REGISTRY.get_handler(authenticated.scheme).remove(authenticated)",
"def delete_router(self, router):\r\n return self.delete(self.router_path % (router))",
"def fusion_api_delete_ls(self, name=None, uri=None, api=None, headers=None):\n return self.ls.delete(name=name, uri=uri, api=api, headers=headers)",
"def delete_application(self, method=\"POST\", short_name=\"sampleapp\"):\r\n if method == \"POST\":\r\n return self.app.post(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)",
"def fusion_api_delete_directory(self, name=None, uri=None, api=None, headers=None):\n return self.logindomain.delete(name, uri, api, headers)",
"def delete_app(self, name):\n raise NotImplementedError",
"def fusion_api_delete_storage_pool(self, uri=None, api=None, headers=None):\n return self.pool.delete(uri=uri, api=api, headers=headers)",
"def fusion_api_remove_switch(self, name=None, uri=None, api=None, headers=None):\n return self.switch.delete(name, uri, api, headers)",
"def unregister_router(self, hostname):",
"def DELETE(self, uri):\n def body(conn, cur):\n self.enforce_right('owner', uri)\n if web.ctx.ermrest_history_snaptime is not None:\n raise exception.Forbidden('deletion of catalog at previous revision')\n if web.ctx.ermrest_history_snaprange is not None:\n # should not be possible bug check anyway...\n raise NotImplementedError('deletion of catalog with snapshot range')\n self.set_http_etag( web.ctx.ermrest_catalog_model.etag() )\n self.http_check_preconditions(method='DELETE')\n self.emit_headers()\n return True\n\n def post_commit(destroy):\n web.ctx.ermrest_registry.unregister(self.catalog_id)\n web.ctx.status = '204 No Content'\n return ''\n\n return self.perform(body, post_commit)",
"def delete_from_backend(uri, **kwargs):\n\n parsed_uri = urlparse.urlparse(uri)\n scheme = parsed_uri.scheme\n\n backend_class = get_backend_class(scheme)\n\n if hasattr(backend_class, 'delete'):\n return backend_class.delete(parsed_uri, **kwargs)",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def fusion_api_delete_storage_system(self, uri=None, api=None, headers=None):\n return self.system.delete(uri=uri, api=api, headers=headers)",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"def delete_server(ServerName=None):\n pass"
] | [
"0.68630743",
"0.67151904",
"0.6714827",
"0.6587817",
"0.6407161",
"0.63303494",
"0.615097",
"0.6058198",
"0.6026346",
"0.59584045",
"0.5894118",
"0.58919597",
"0.5862057",
"0.583348",
"0.5830776",
"0.57852274",
"0.575848",
"0.5754835",
"0.57540447",
"0.5753239",
"0.574271",
"0.57352257",
"0.5731633",
"0.57232785",
"0.5716786",
"0.5698687",
"0.56845754",
"0.5680586",
"0.56590056",
"0.5654049"
] | 0.79687536 | 0 |
Initialize particles to be consistent with a uniform prior. Each particle is a tuple of ghost positions. Use self.numParticles for | def initializeParticles(self):
import itertools
import random
#create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.
permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))
random.shuffle(permutations)
p = len(permutations)
n = self.numParticles
self.particles = []
#create the particles
while n >= p:
self.particles += permutations
n -= p
#add the remainder
self.particles += permutations[0: n - 1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None",
"def __init__(self, init_pos, init_stdev, num_particles, sense_noise):\n self.particles = np.random.multivariate_normal(\n init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)\n self.weights = np.array(\n [1. / num_particles for _ in range(num_particles)])\n self.n = num_particles\n self.sense_noise = sense_noise",
"def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])",
"def _init_particles(self):\n self.NPART = self.grid.get_npart()\n self.particles = np.empty(self.NPART, dtype=object)\n for i in range(self.NPART):\n tmem = TMEM\n ux = UXM + UPRIME*normal()*LANGFACTOR\n vy = VYM + UPRIME*normal()*LANGFACTOR\n self.particles[i] = Particle(tmem=tmem, ux=ux, vy=vy)\n #\n # PUT THE PARTICLES IN THE CELLS.\n # LOOP OVER CELLS AND DEFINE THEIR PARTICLES.\n # FOR NOW, ONLY POSITION DEPENDS ON SPACE HEIGHT & MEMORY DO NOT.\n # FIRST THE TREE PARTICLES, THEN THE BUILDING PARTICLES.\n #\n NX = self.grid.NX\n NY = self.grid.NY\n icounter = 0\n for i in range(NX - 1):\n for j in range(NY - 1):\n cell = self.grid.CELLS[i, j]\n x = self.grid.XCELL[i, j]\n y = self.grid.YCELL[i, j]\n for k in range(cell.NPARTTR):\n self.particles[k + icounter].update(x=x, y=y, type=1)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + icounter].update(x=x, y=y, type=2)\n icounter += cell.NPARTTR + cell.NPARTRAD",
"def resetParticles(self, gameState, ghost=None):\n # Particle with all ghosts in start state\n if not getattr(self, 'particles', []):\n p = tuple(gameState.getInitialAgentPosition(g) for g in\n self.ghostIndices)\n self.particles = [p] * self.numGhosts\n else:\n for p in self.particles:\n positions = list(p)\n positions[self.ghostIndices.index(ghost)] = \\\n gameState.getInitialAgentPosition(ghost)\n p = tuple(positions)",
"def initSamples(self):\n # Define this (note random.uniform is helpful here!)\n for i in range(self.numParticles):\n # Create particles uniformly and generate same weights for all particles.\n particle = random.uniform(self.minValue, self.maxValue)\n self.samples.append(particle)\n self.weights.append(1/self.numParticles)",
"def __init__(self, particles):\n self.particles = particles",
"def initialise_particle_cloud(self, initialpose):\n # ----- Initialize the particle cloud as an empty array\n self.particlecloud = PoseArray()\n\n \"\"\"Create the noise to multiply by the random Gaussian number that will\n get added to each of the Poses, that are set to a random position\n and orientation around the initial pose\"\"\"\n sensorSigma=3 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n \"\"\"Create a range for the ammount of random Gaussian values to generate \"\"\"\n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n randomYawArray = []\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n x=random.randint(1,180)\n randomYaw=(math.pi/x)\n randomYawArray.append(randomYaw)\n\n iterator = 0\n\n \"\"\"\n\t Set the particles to a random position and orientation around the initial pose\n \"\"\"\n particleNumber = 10**2 # 10**3 # 10**4 # 10**5 experiment with different ammounts of particles\n\n while iterator < particleNumber:\n particle = Pose()\n particle.position.x = initialpose.pose.pose.position.x + (gaussianRandomNumX[iterator] * noise)\n particle.position.y = initialpose.pose.pose.position.y + (gaussianRandomNumY[iterator] * noise)\n particle.position.z = initialpose.pose.pose.position.z\n particle.orientation = rotateQuaternion(initialpose.pose.pose.orientation, randomYawArray[iterator])\n\n self.particlecloud.poses.append(particle)\n iterator += 1\n\n return self.particlecloud",
"def assignPositions(self):\n n = int(math.ceil(self.numAtoms**(1.0/3.0))) # Number of atoms in a direction\n particle = 0 # Particles placed so far\n \n for x in range(0, n):\n for y in range(0, n):\n for z in range(0, n):\n if (particle < self.numAtoms):\n self.atoms[particle].x = x * self.sigma\n self.atoms[particle].y = y * self.sigma \n self.atoms[particle].z = z * self.sigma\n particle += 1",
"def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FIXME\n max_velocity = (self.upper_bound - self.lower_bound) / magic_constant\n shape = (len(self.positions), len(self.lower_bound))\n self._pso_data.velocities = np.random.uniform(low=-max_velocity, high=max_velocity, size=shape)",
"def __init__(self, dim: tuple, count: int):\n self.surface = pygame.Surface(dim)\n # initialize\n self.particles = []\n # initialize\n for counter in range(count):\n pos = pygame.Vector2(random.randint(0, self.surface.get_width()), random.randint(0, self.surface.get_height()))\n direction = pygame.Vector2(10 * (random.random() - 0.5), 10 * (random.random() - 0.5))\n color = pygame.Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 255)\n size = 5 + random.randint(0, 10)\n particle = Particle(self.surface, pos, direction, size, color)\n self.particles.append(particle)",
"def __init__(self, init_pos_1, init_pos_2, M_1, M_2, spring_cos, equi_len):\n self.p1 = Particle(init_pos_1, M_1)\n self.p2 = Particle(init_pos_2, M_2)\n self.k = spring_cos\n self.L0 = equi_len",
"def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))",
"def __init__(self, func, init_pos, n_particles):\n self.func = func\n self.n_particles = n_particles\n self.init_pos = np.array(init_pos)\n self.particle_dim = len(init_pos)\n # Initialize particle positions using a uniform distribution\n self.particles_pos = np.random.uniform(size=(n_particles, self.particle_dim) ) \\\n * self.init_pos\n # Initialize particle velocities using a uniform distribution\n self.velocities = np.random.uniform(size=(n_particles, self.particle_dim))\n\n # Initialize the best positions\n self.g_best = init_pos\n self.p_best = self.particles_pos\n self.phi = 2",
"def __init__(self, func, init_pos, n_particles):\n self.func = func\n self.n_particles = n_particles\n self.init_pos = np.array(init_pos)\n self.particle_dim = len(init_pos)\n # Initialize particle positions using a uniform distribution\n self.particles_pos = np.random.uniform(size=(n_particles, self.particle_dim)) \\\n * self.init_pos\n # Initialize particle velocities using a uniform distribution\n self.velocities = np.random.uniform(size=(n_particles, self.particle_dim))\n\n # Initialize the best positions\n self.g_best = init_pos\n self.p_best = self.particles_pos",
"def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)",
"def init_population(self):\n print('Initializing...')\n for i in range(self.part_num):\n x = Particle()\n # initialize random position\n x.Pos = np.zeros(self.dim)\n for j in range(len(x.Pos)):\n x.Pos[j] = np.random.uniform(self.var_size[j][0], self.var_size[j][1])\n # calculate cost from random parameters\n #print(x.Pos)\n x.Cost = self.objective(x.Pos)\n x.Vel = np.zeros(self.dim)\n x.Best_pos = x.Pos\n x.Best_cost = x.Cost\n self.particle.append(x)\n\n if self.particle[i].Best_cost < self.GlobalBest_Cost:\n self.GlobalBest_Cost = self.particle[i].Best_cost\n self.GlobalBest_Pos = self.particle[i].Best_pos\n self.Best_Cost.append(self.GlobalBest_Cost)\n print('Initialize complete, with best cost =',\n self.GlobalBest_Cost, \n \"\\nTemporary best solution:\", \n self.GlobalBest_Pos)",
"def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))",
"def _initialise(self):\n if self._running:\n raise RuntimeError('Already initialised.')\n\n # Propose x0 as first points\n # Note proposal is multiple points this time!\n self._current = None\n self._current_log_pdfs = None\n self._proposed = self._x0\n self._proposed.setflags(write=False)\n\n # Number of chains left to update in this cycle\n self._remaining = np.arange(self._n_chains)\n\n # Update sampler state\n self._running = True",
"def initialize_particle_cloud(self, xy_theta=None):\n if xy_theta == None:\n xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)\n rad = 1 # meters\n\n self.particle_cloud = []\n self.particle_cloud.append(Particle(xy_theta[0], xy_theta[1], xy_theta[2]))\n for i in range(self.n_particles - 1):\n # initial facing of the particle\n theta = random.random() * 360\n\n # compute params to generate x,y in a circle\n other_theta = random.random() * 360\n radius = random.random() * rad\n # x => straight ahead\n x = radius * math.sin(other_theta) + xy_theta[0]\n y = radius * math.cos(other_theta) + xy_theta[1]\n particle = Particle(x, y, theta)\n self.particle_cloud.append(particle)\n\n self.normalize_particles()\n self.update_robot_pose()",
"def p_prior(self):\n sampler = self.__sampler\n nwalkers = self.nwalkers\n pRanges = self.pRanges\n if sampler == \"EnsembleSampler\":\n p = [posRange(pRanges) for i in range(nwalkers)]\n elif sampler == \"PTSampler\":\n ntemps = self.ntemps\n p = np.zeros((ntemps, nwalkers, self.ndim))\n for loop_t in range(ntemps):\n for loop_w in range(nwalkers):\n p[loop_t, loop_w, :] = posRange(pRanges)\n return p",
"def distribute_Gaussian(self):\n\n sigma_x = np.sqrt(self.emitx*self._betax)\n sigma_xp = np.sqrt(self.emitx*self._gammax)\n\n sigma_y = np.sqrt(self.emity*self._betay)\n sigma_yp = np.sqrt(self.emity*self._gammay)\n\n self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates\n self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates\n self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates\n self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates",
"def u_init(self):\n\n u0 = self.params.u0\n N = self.params.nparts\n\n u = self.dtype_u(((3, N), self.init[1], self.init[2]))\n\n if u0[2][0] != 1 or u0[3][0] != 1:\n raise ProblemError('so far only q = m = 1 is implemented')\n\n # set first particle to u0\n u.pos[0, 0] = u0[0][0]\n u.pos[1, 0] = u0[0][1]\n u.pos[2, 0] = u0[0][2]\n u.vel[0, 0] = u0[1][0]\n u.vel[1, 0] = u0[1][1]\n u.vel[2, 0] = u0[1][2]\n\n u.q[0] = u0[2][0]\n u.m[0] = u0[3][0]\n\n # initialize random seed\n np.random.seed(N)\n\n comx = u.pos[0, 0]\n comy = u.pos[1, 0]\n comz = u.pos[2, 0]\n\n for n in range(1, N):\n # draw 3 random variables in [-1,1] to shift positions\n r = np.random.random_sample(3) - 1\n u.pos[0, n] = r[0] + u0[0][0]\n u.pos[1, n] = r[1] + u0[0][1]\n u.pos[2, n] = r[2] + u0[0][2]\n\n # draw 3 random variables in [-5,5] to shift velocities\n r = np.random.random_sample(3) - 5\n u.vel[0, n] = r[0] + u0[1][0]\n u.vel[1, n] = r[1] + u0[1][1]\n u.vel[2, n] = r[2] + u0[1][2]\n\n u.q[n] = u0[2][0]\n u.m[n] = u0[3][0]\n\n # gather positions to check center\n comx += u.pos[0, n]\n comy += u.pos[1, n]\n comz += u.pos[2, n]\n\n # print('Center of positions:',comx/N,comy/N,comz/N)\n\n return u",
"def x_add_particles():\n particle_count_list = np.zeros(7)",
"def initialize_particles(self,\r\n lower_bound,\r\n upper_bound,\r\n dimensions,\r\n objective_function):\r\n particles = []\r\n for _ in range(self.swarmsize):\r\n particles.append(Particle(lower_bound,\r\n upper_bound,\r\n dimensions,\r\n objective_function))\r\n if particles[-1].best_function_value[-1] < self.best_function_value[-1]:\r\n self.best_function_value.append(particles[-1].best_function_value[-1])\r\n self.best_position.append(particles[-1].best_position[-1])\r\n\r\n\r\n self.best_position = [self.best_position[-1]]\r\n self.best_function_value = [self.best_function_value[-1]]\r\n\r\n return particles",
"def reset_parameters(self):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() == 2:\n fan_in = p.size(1)\n nn.init.uniform_(p, a=-math.sqrt(4 / fan_in), b=math.sqrt(4 / fan_in))\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', math.sqrt(4 / fan_in)))\n elif p.dim() == 3:\n fan_in = p.size(1) * p[0][0].numel()\n nn.init.uniform_(p, a=-math.sqrt(4 / fan_in), b=math.sqrt(4 / fan_in))\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', math.sqrt(4 / fan_in)))\n elif p.dim() == 4:\n fan_in = p.size(1) * p[0][0].numel()\n nn.init.uniform_(p, a=-math.sqrt(4 / fan_in), b=math.sqrt(4 / fan_in))\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', math.sqrt(4 / fan_in)))\n else:\n raise ValueError(n)",
"def initialize_persons(self) -> None:\n self.population.initialize_id(0, self.size)\n self.population.initialize_ages(self.min_age, self.max_age, self.size)\n self.population.initialize_positions(self.x_bounds, self.y_bounds,\n self.size)\n self.population.initialize_g_value(self.r, 1/self.k, self.size)\n self.population.initialize_mortality_rate(self.size,\n self.mortality_rate)\n self.population.initialize_susceptibility()\n self.population.initialize_infected_by()\n\n self.persons[:, 7] = 1\n self.persons[:, 10] = 0.1\n self.persons[:, 11] = 0.1\n\n # Update the destination each person is headed to and corresponding\n # speed randomly\n self.persons = self.movement.update_persons(self.persons, self.size,\n self.speed, 1)\n\n self.infected_person = np.random.randint(0, self.size)\n self.persons[self.infected_person, index.g_value] = 3\n self.population.set_infected_at(self.infected_person, 0)\n self.persons[self.infected_person, index.infected_by] = \\\n self.infected_person\n self.persons[self.infected_person, index.social_distance] = 0\n self.persons[self.infected_person, 9] = 1",
"def init_pvelocity(self, individuals):\n for individual in individuals:\n # the initial speed is set to zero\n individual.features['velocity'] = [0] * len(individual.vector)\n\n return",
"def init_particle_filter(self, motion_prior, n_p):\n # Define necessary components for the particle filter\n if motion_prior['mode'] == 'PositionDiffusion':\n # Diffusion\n dc_infer = motion_prior['dc']\n d_h = 2 # Dimension of hidden state (i.e. x,y = 2 dims)\n sdev = np.sqrt(dc_infer * self.dt / 2) * np.ones((d_h,))\n ipd = pf.GaussIPD(d_h, self.n_n, sdev * 0.001)\n tpd = pf.GaussTPD(d_h, self.n_n, sdev)\n ip = pf.GaussIP(d_h, sdev * 0.001)\n tp = pf.GaussTP(d_h, sdev)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n\n elif motion_prior['mode'] == 'VelocityDiffusion':\n # FIXME: save these params\n d_h = 4 # Hidden state dim, x,y,vx,vy\n\n v0 = motion_prior['v0'] # Initial Estimate for velocity\n dcv = motion_prior['dcv'] # Velocity Diffusion Constant\n st = np.sqrt(dcv * self.dt)\n adj = np.sqrt(1 - st ** 2 / v0 ** 2)\n\n eps = 0.00001 # Small number since cannot have exact zero\n sigma0 = np.array([eps, eps, v0, v0]) # Initial sigmas\n sigma_t = np.array([eps, eps, st, st]) # Transition sigmas\n\n # Transition matrix\n a = np.array([[1, 0, self.dt, 0],\n [0, 1, 0, self.dt],\n [0, 0, adj, 0],\n [0, 0, 0, adj]])\n\n ipd = pf.GaussIPD(d_h, self.n_n, sigma0)\n tpd = pf.GaussTPD(d_h, self.n_n, sigma_t, A=a)\n ip = pf.GaussIP(d_h, sigma0)\n tp = pf.GaussTP(d_h, sigma_t, A=a)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n # Note trick where PoissonLP takes 0,1 components of the\n # hidden state which is the same for both cases\n\n else:\n raise ValueError(\n 'Unrecognized Motion Prior ' + str(motion_prior))\n\n r = np.zeros((self.n_n, self.n_t)).astype('float32')\n return pf.ParticleFilter(\n ipd, tpd, ip, tp, lp, r.transpose(), n_p)",
"def _launch_particles(self, istep):\n for i in range(self.grid.NX-1):\n for j in range(self.grid.NY-1):\n INDX = i\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n TLOCAL = self.TIME[istep] - cell.CLOCK\n TCRIT = cell.TIGNTR * (1 + RELT*normal())\n if cell.BURNSTAT == 1 and TLOCAL > TCRIT and cell.BURNSTAT2 == 1:\n LOCALF = LANGFACTOR\n indp = (INDX*(self.grid.NY - 1) + INDY)*2*Cell.NPARTMAX - 1\n for k in range(cell.NPARTTR):\n self.particles[k + indp].update(state=1.0, factor=LOCALF)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + indp].update(state=1.0, factor=LOCALF)\n cell.BURNSTAT2 = 0"
] | [
"0.73676294",
"0.7053749",
"0.68827",
"0.6763224",
"0.671548",
"0.6645336",
"0.6637801",
"0.66186696",
"0.6572894",
"0.6544678",
"0.64980954",
"0.6426581",
"0.6356929",
"0.6327402",
"0.63196164",
"0.62938076",
"0.6253168",
"0.62287265",
"0.6143017",
"0.6065585",
"0.60641867",
"0.6046868",
"0.602023",
"0.6014569",
"0.59893346",
"0.5973218",
"0.5944427",
"0.5921359",
"0.59111816",
"0.58696824"
] | 0.7536992 | 0 |
Get the value of the ADDONSPATH variable. | def get_cached_addon_path():
settingspath = get_cached_setting_path()
if not settingspath:
logger.error("#SETTINGSPATH# resolution required but was not found")
return
return os.path.join(settingspath, "Addons") + "\\" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_abs_path(self, value):\n return os.path.abspath(os.path.expanduser(os.path.expandvars(value)))",
"def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")",
"def getapxs_location():\n return getconfigure_option(\"APXS\")",
"def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path",
"def GetImportRoot():\n import_root_env = os.environ.get('LOGICAPATH')\n if not import_root_env:\n return None\n roots = import_root_env.split(':')\n if len(roots) > 1:\n return roots\n else:\n return import_root_env",
"def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory",
"def GetPath () :\n return sys.hal_log_values [\"__log_path\"]",
"def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")",
"def get_path(self, key):\n value = self.getn(key)\n if value is None:\n logger.warning(\"Specified config '%s' is None or not exist\" % key)\n return None\n if not isinstance(value, str):\n msg = \"Specified config '%s' is non-string: %s\" % (key, value)\n logger.error(msg)\n raise ValueError(msg)\n #\n path = os.path.expanduser(value)\n if not os.path.isabs(path):\n # Got relative path, try to convert to the absolute path\n if hasattr(self, \"userconfig\"):\n # User configuration loaded\n path = os.path.join(os.path.dirname(self.userconfig), path)\n else:\n logger.warning(\"Cannot convert to absolute path: %s\" % path)\n return os.path.normpath(path)",
"def server_relative_path(self):\n return self.properties.get(\"ServerRelativePath\", SPResPath(None))",
"def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]",
"def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)",
"def start_path(self) -> str:\n return self.get_from_redis(\"start_path\")",
"def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]",
"def application_service_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_service_path\")",
"def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path",
"def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)",
"def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;",
"def get_appdir():\n\n return APP_PATH",
"def path_addons_local(self) -> Path:\n return self.path_supervisor / ADDONS_LOCAL",
"def get_sia_manifest_uri(self):\n\n sia = self.get_POW().getSIA()\n return None if sia is None else first_rsync_uri(sia[1])",
"def path(self):\n if self._path:\n return self._path\n path = os.environ[\"PATH\"].split(os.pathsep)\n path = [os.path.expanduser(x) for x in path]\n path = [os.path.abspath(x) for x in path]\n path = [x for x in path if os.path.exists(x)]\n self._path = path\n return self._path",
"def AppPath(self):\n\t\treturn self.acad.Path",
"def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))",
"def work_root(session):\n return session[\"AVALON_WORKDIR\"]",
"def get_pkgdirimp_dotpathprefix_site(self):\n from string import join\n modpath = self.sitemodulename\n dirpath = join(modpath.split('.')[:-1], '.')\n return dirpath",
"def _get_dragons_input_test_path():\n try:\n path = os.path.expanduser(os.environ['DRAGONS_TEST_INPUTS'])\n\n except KeyError:\n\n print('\\n This script needs the environment variable DRAGONS_TEST_INPUTS'\n '\\n Please, add is using the following command: '\n '\\n $ export DRAGONS_TEST_INPUTS=\"/my/test/path/\"'\n '\\n and run again. Leaving now.'\n '\\n ')\n\n sys.exit(1)\n\n return path",
"def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)",
"def get_ospl_home_bin(self):\r\n return self.ospl_home_bin"
] | [
"0.6033623",
"0.5921134",
"0.58051646",
"0.5630593",
"0.5581429",
"0.5525819",
"0.55247533",
"0.5490318",
"0.5476768",
"0.54625976",
"0.54127926",
"0.53831667",
"0.5338885",
"0.5338132",
"0.53280693",
"0.53262985",
"0.52948344",
"0.5281537",
"0.52779865",
"0.52707076",
"0.52691346",
"0.52659154",
"0.5259245",
"0.5232397",
"0.5229939",
"0.521925",
"0.5219165",
"0.5214684",
"0.52041924",
"0.5191026"
] | 0.5957791 | 1 |
Get the VOLTHA PORT object for this port | def get_port(self):
if self._port is None:
self._port = Port(port_no=self._port_no,
label=self._label,
type=Port.PON_OLT,
admin_state=AdminState.ENABLED,
oper_status=OperStatus.ACTIVE)
# TODO: For now, no way to report the proper ADMIN or OPER status
# admin_state=self._admin_state,
# oper_status=self._oper_status)
return self._port | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_port(self):\n return self.__port",
"def getPort(self):\n return self._port",
"def get_port(self):\n return self.__port",
"def getPort(self):\n return self._port",
"def get_port(self):\n return self.port",
"def get_port(self):\n \n return self._port",
"def get_port(self) -> int:\n return self._port",
"def _get_nport(self):\n return self.__nport",
"def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_get_port(self)",
"def com_port():\n port = ListPortInfo(DEFAULT_PORT)\n port.serial_number = \"1234\"\n port.manufacturer = \"Virtual serial port\"\n port.device = DEFAULT_PORT\n port.description = \"Some serial port\"\n\n return port",
"def Port(self) -> int:",
"def port(self, **kw):\n return self.portType(**kw)",
"def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_get_port(self)",
"def get_port_by_id(self, id):\n return self.network.get_port(id)",
"def port(self):\n # This property is not 100% needed, but is included instead of making the raw variable public to prevent people from accidentally overwriting the port and screwing up this representative value\n return self._port",
"def read_port_vlan_info(self, port: int) -> Vlans:\n raise NotImplementedError",
"def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")",
"def get_vncport(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetVNCPort', self.handle)",
"def port1(self):\n return self._port1",
"def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")",
"def out_aux_port(self, port):\n return self.auxout[port - 1]",
"def read_port_vlan_info(self, port: int) -> Vlans:\n return self._current_dev_manager.read_port_vlan_info(port=port)",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port"
] | [
"0.6576839",
"0.6366624",
"0.63357687",
"0.628415",
"0.62502146",
"0.62232053",
"0.59385276",
"0.5834423",
"0.58323324",
"0.5817069",
"0.5712418",
"0.5707369",
"0.56816727",
"0.564813",
"0.56160957",
"0.5613599",
"0.55900675",
"0.55900675",
"0.5557575",
"0.55402493",
"0.5536671",
"0.5536671",
"0.5536671",
"0.55224174",
"0.55069995",
"0.5490928",
"0.5490928",
"0.5490928",
"0.5490928",
"0.5490928"
] | 0.69255394 | 0 |
Get the VOLTHA logical port for this port. For PON ports, a logical port is not currently created, so always return None | def get_logical_port(self):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_port(self):\n if self._port is None:\n self._port = Port(port_no=self._port_no,\n label=self._label,\n type=Port.PON_OLT,\n admin_state=AdminState.ENABLED,\n oper_status=OperStatus.ACTIVE)\n # TODO: For now, no way to report the proper ADMIN or OPER status\n # admin_state=self._admin_state,\n # oper_status=self._oper_status)\n return self._port",
"def _get_port(self):\n return self.__port",
"def get_physical_to_logical(self, port_num):\n\n return self.physical_to_logical.get(port_num)",
"def get_port(self):\n return self.__port",
"def internal_port(self):\r\n return self._internal_port",
"def get_port(self):\n return self.port",
"def get_port(self):\n \n return self._port",
"def _get_nport(self):\n return self.__nport",
"def getPort(self):\n return self._port",
"def get_serial(cls, port):\n if port in cls._open_ports:\n return cls._open_ports[port]\n else:\n return None",
"def getPort(self):\n return self._port",
"def external_port(self):\r\n return self._external_port",
"def get_logical_to_physical(self, logical_port):\n\n return self.logical_to_physical[logical_port]",
"def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")",
"def get_vncport(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetVNCPort', self.handle)",
"def get_port(self) -> int:\n return self._port",
"def get_portlet(self):\n if self.PORTLET_IDENT not in self._data_dict:\n return None\n return dao.get_portlet_by_identifier(self._data_dict[self.PORTLET_IDENT])",
"def get_vnc_port(self):\n\t\troot = self.get_xml()\n\t\t# get the VNC port\n\t\tgraphics = root.find('./devices/graphics')\n\t\tport = graphics.get('port')\n\t\treturn port",
"def head_port_monitoring(self):\n return self.head_args.port_monitoring if self.head_args else None",
"def localport(self) :\n\t\ttry :\n\t\t\treturn self._localport\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_device_port() -> Optional[str]:\n devices = [comport.device for comport in serial.tools.list_ports.comports()]\n for comport in devices:\n try:\n # find comport with our device\n ser = open_port(comport)\n if ser:\n command: bytes = bytes(\"ping\\r\\n\", encoding='utf-8')\n ser.write(command)\n answer: str = ser.readall().decode('utf-8')\n ser.close()\n if 'Ok' in answer:\n return comport\n except serial.SerialException:\n continue\n return None",
"def getWebPort(self):\n port = None\n if hasattr(self, 'web'):\n port = self.web.get('port', None)\n\n if port is not None:\n # Make sure it is an int.\n return int(port)\n else:\n return None",
"def peer_port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"peer_port\")",
"def get_cmd_port(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetCmdPort', self.handle)",
"def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")",
"def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")"
] | [
"0.69949347",
"0.6425281",
"0.6324397",
"0.6155099",
"0.61252946",
"0.608648",
"0.6081095",
"0.60716444",
"0.6043632",
"0.60040736",
"0.5972916",
"0.5964086",
"0.5931539",
"0.59029865",
"0.59029865",
"0.58882225",
"0.58602065",
"0.5801619",
"0.57786167",
"0.5769991",
"0.57699317",
"0.57632744",
"0.5760778",
"0.5754278",
"0.575178",
"0.5739432",
"0.5739432",
"0.5739432",
"0.5739432",
"0.5739432"
] | 0.73856133 | 0 |
Process PON status poll request | def process_status_poll(self, status):
self.log.debug('process-status-poll', status=status)
if self._admin_state != AdminState.ENABLED:
return
# Get new/missing from the discovered ONU leaf. Stale ONUs from previous
# configs are now cleaned up during h/w re-sync/reflow.
new, rediscovered_onus = self._process_status_onu_discovered_list(status.discovered_onu)
# Process newly discovered ONU list and rediscovered ONUs
for serial_number in new | rediscovered_onus:
reactor.callLater(0, self.add_onu, serial_number, status)
# Process LOS list
self._process_los_alarms(frozenset(status.ont_los))
# Process ONU info. Note that newly added ONUs will not be processed
# until the next pass
self._update_onu_status(status.onus) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poll(self, poll_input):",
"def _process_pool_status_response(self, buf, length):\n\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'success')\n\t\tvalue, index = self._getNextString(buf, 0)\n\t\tif value == 'ArraySize':\n\t\t\tindex += 1\n\t\t\tci_size = buf[index:]\n\t\t\tci_size = self.bytes_to_int(ci_size)\n\n\t\t\tself._setResultStatus(ResultStateType.INCOMPLETE)\n\t\t\tself.pcpResInfo.pcp_add_json_result('config', list())\n\t\telif value == 'ProcessConfig':\n\t\t\tindex += 1\n\t\t\tif self.PCPResultStatus(self.pcpResInfo) != ResultStateType.INCOMPLETE:\n\t\t\t\tself.pcp_internal_error('command failed. invalid response')\n\t\t\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'failed')\n\t\t\t\tself._setResultStatus(ResultStateType.BAD_RESPONSE)\n\n\t\t\tstatus = POOL_REPORT_CONFIG()\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.name = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.value = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.desc = value\n\n\t\t\tself.pcpResInfo.pcp_append_json_result('config', status.get_json())\n\t\t\tself._setResultData(self.pcpResInfo, status)\n\t\telif value == 'CommandComplete':\n\t\t\tself._setResultStatus(ResultStateType.COMMAND_OK)",
"def _process_ping_response(self, message):\n self.set_available(True, True)\n if self.in_RSSI != message.in_RSSI.value:\n self.in_RSSI = message.in_RSSI.value\n self.do_callback(SENSOR_RSSI_IN[\"id\"])\n if self.out_RSSI != message.out_RSSI.value:\n self.out_RSSI = message.out_RSSI.value\n self.do_callback(SENSOR_RSSI_OUT[\"id\"])\n if self.ping_ms != message.ping_ms.value:\n self.ping_ms = message.ping_ms.value\n self.do_callback(SENSOR_PING[\"id\"])",
"def _handle_poll(self, relpath, params):\r\n request = json.loads(params.get('q')[0])\r\n ret = {}\r\n # request is a polling request for multiple files. For each file:\r\n # - id is some identifier assigned by the client, used to differentiate the results.\r\n # - path is the file to poll.\r\n # - pos is the last byte position in that file seen by the client.\r\n for poll in request:\r\n _id = poll.get('id', None)\r\n path = poll.get('path', None)\r\n pos = poll.get('pos', 0)\r\n if path:\r\n abspath = os.path.normpath(os.path.join(self._root, path))\r\n if os.path.isfile(abspath):\r\n with open(abspath, 'r') as infile:\r\n if pos:\r\n infile.seek(pos)\r\n content = infile.read()\r\n ret[_id] = content\r\n self._send_content(json.dumps(ret), 'application/json')",
"async def _async_status_request(self) -> None:\n try:\n # status_response = await self._hass.async_add_executor_job(\n # self._mc_status.status, self._MAX_RETRIES_STATUS\n # )\n if self.access_token:\n if (time.time() - self.last_request) > 1800:\n phantom = await self._hass.async_add_executor_job(\n self._phantom_load\n )\n if phantom.status_code == HTTP_OK:\n self.phantom_load = round(phantom.json().get(\"power\") / 1000, 3)\n else:\n _LOGGER.warning(phantom.content)\n\n # Got answer to request, update properties.\n live = await self._hass.async_add_executor_job(self._live_data)\n\n if live.status_code == HTTP_OK:\n self.power_usage = round(abs(live.json().get(\"power\")) / 1000, 3)\n else:\n _LOGGER.warning(live.content)\n\n self.last_request = time.time()\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.power_usage = None\n self.phantom_load = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s' failed - OSError: %s\",\n self.unique_id,\n error,\n )\n self._last_status_request_failed = True",
"def poll(self):\n self.poll_function(self.connection)",
"def test_poll(self):\n ret = poll.poll(self.args)\n self.assertEqual(ret, poll.NOERROR)",
"def _poll(data):\n target, vlan, oids = data\n errors = 0\n timeouts = 0\n results = {}\n for oid in oids:\n logging.debug('Collecting %s on %s @ %s', oid, target.host, vlan)\n if not oid.startswith('.1'):\n logging.warning(\n 'OID %s does not start with .1, please verify configuration', oid)\n continue\n try:\n results.update(\n {(k, vlan): v for k, v in target.walk(oid, vlan).iteritems()})\n except snmp.TimeoutError, e:\n timeouts += 1\n if vlan:\n logging.debug(\n 'Timeout, is switch configured for VLAN SNMP context? %s', e)\n else:\n logging.debug('Timeout, slow switch? %s', e)\n except snmp.Error, e:\n errors += 1\n logging.warning('SNMP error for OID %s@%s: %s', oid, vlan, str(e))\n return results, errors, timeouts",
"def process_poll_result(self, event, fd):\n if event & select.POLLNVAL:\n assert False, 'POLLNVAL on fd %d' % fd\n elif event & select.POLLIN:\n self.trigger_callback(fd)\n elif event & (select.POLLHUP | select.POLLERR):\n self.trigger_hangup(fd, event)\n else:\n assert False, \"Unknown event type %d on fd %d\" % (event, fd)",
"def HandlePingRequest(self, request, response):\n self._publish_helper.HandlePingRequest(request, response)",
"def _hpoll(self, pc):\n while True:\n mtype, mdata1, mdata2=self.gMsg()\n if mtype==\"msg\":\n self.processMsg(mdata1, mdata2)\n continue\n if mtype is None:\n break",
"async def parse_status_update(self, data):\n\n # If we don't know the config, just ask for it and wait for that\n if not self.config_loaded:\n await self.send_panel_req(0, 1)\n return\n\n # Check if the spa had anything new to say.\n # This will cause our internal states to update once per minute due\n # to the hour/minute counter. This is ok.\n have_new_data = False\n if self.prior_status is not None:\n for i in range(0, 31):\n if data[i] != self.prior_status[i]:\n have_new_data = True\n break\n else:\n have_new_data = True\n self.prior_status = bytearray(31)\n\n if not have_new_data:\n return\n\n if data[14] & 0x01:\n self.tempscale = self.TSCALE_C\n else:\n self.tempscale = self.TSCALE_F\n\n self.time_hour = data[8]\n self.time_minute = data[9]\n if data[14] & 0x02:\n self.timescale = self.TIMESCALE_12H\n else:\n self.timescale = self.TIMESCALE_24H\n\n temp = float(data[7])\n settemp = float(data[25])\n if self.tempscale == self.TSCALE_C:\n self.curtemp = temp / 2.0\n self.settemp = settemp / 2.0\n else:\n self.curtemp = temp\n self.settemp = settemp\n\n # flag 2 is heatmode\n self.heatmode = data[10] & 0x03\n\n # flag 3 is filter mode\n self.filter_mode = (data[14] & 0x0c) >> 2\n\n # flag 4 heating, temp range\n self.heatstate = (data[15] & 0x30) >> 4\n self.temprange = (data[15] & 0x04) >> 2\n\n for i in range(0, 6):\n if not self.pump_array[i]:\n continue\n # 1-4 are in one byte, 5/6 are in another\n if i < 4:\n self.pump_status[i] = (data[16] >> i*2) & 0x03\n else:\n self.pump_status[i] = (data[17] >> ((i - 4)*2)) & 0x03\n\n if self.circ_pump:\n if data[18] == 0x02:\n self.circ_pump_status = 1\n else:\n self.circ_pump_status = 0\n\n for i in range(0, 2):\n if not self.light_array[i]:\n continue\n self.light_status[i] = (data[19] >> i) & 0x03\n\n if self.mister:\n self.mister_status = data[20] & 0x01\n\n if self.blower:\n self.blower_status = (data[18] & 0x0c) >> 2\n\n for i in range(0, 2):\n if not self.aux_array[i]:\n continue\n if i == 0:\n self.aux_status[i] = data[20] & 0x08\n else:\n self.aux_status[i] = data[20] & 0x10\n\n self.lastupd = time.time()\n # populate prior_status\n for i in range(0, 31):\n self.prior_status[i] = data[i]\n await self.int_new_data_cb()",
"def remote_status():",
"def onPing(self, payload):",
"def poll_pedrpc(self, target):\n self._stop_netmon(target)\n\n self._check_procmon_failures(target)",
"def status(event):\n e = ''\n\n try:\n logger.setLevel(event.get('loglevel'))\n logging.getLogger('urllib3').setLevel(event.get('loglevel'))\n except:\n pass\n try:\n pool = urllib3.PoolManager()\n except Exception as e:\n raise CreatePoolManagerFailure(e)\n\n if event.get('url', None) is None:\n raise AttributeError(\"url not specified\")\n\n # The code doesn't know how to handle POST\n # The code doesn't know how to handle these yet\n\n st = time.perf_counter()\n try:\n response = pool.request(\n event.get('method', 'GET'),\n event.get('url', None),\n retries=int(event.get('retries', 3)),\n timeout=float(event.get('timeout', 3)))\n except Exception as e:\n raise HttpRequestError(e)\n\n responseTime = (time.perf_counter() - st) * 1000\n\n logger.debug(\"checking endpoint: %s:%s status=%s bytes=%s time=%.3fms\",\n event.get('method', 'GET'),\n event.get('url', None), response.status,\n response._fp_bytes_read, responseTime)\n\n if response.status >= 200 and response.status <= 299:\n statusMessage = \"2xx\"\n elif response.status >= 300 and response.status <= 399:\n statusMessage = \"3xx\"\n elif response.status >= 400 and response.status <= 499:\n statusMessage = \"4xx\"\n elif response.status >= 500 and response.status <= 599:\n statusMessage = \"5xx\"\n endpointStatus = response.status\n\n ts = datetime.datetime.timestamp(datetime.datetime.now())\n \n logging.getLogger('urllib3').setLevel(logging.WARNING)\n \n return {\n 'statusCode': 200,\n 'body': \"OK\",\n 'url': event.get('url', None),\n 'error': e,\n 'timestamp': ts,\n 'endpoint': {\n 'status': endpointStatus,\n 'message': statusMessage,\n 'time': responseTime\n }\n }",
"def pool_summary_request(cls, task_id, req, app):\n try:\n if task_id:\n req.update_report_status('Processing')\n db.session.commit()\n app.logger.info('polling task with task_id: {0} and request_id: {1}'.format(task_id, req.id))\n thread = threading.Thread(daemon=True, target=cls.check_summary_status,\n args=(task_id, req, app))\n thread.start()\n else:\n req.update_report_status('Failed')\n db.session.commit()\n except Exception as e:\n app.logger.exception(e)\n req.update_report_status('Failed')\n db.session.commit()",
"def status_callback(self, data):\n\n print \"arm status callback\", data.data\n if data.data == \"busy\" or data.data == \"error\":\n self.status = 0\n elif data.data == \"free\":\n self.status = 1",
"def ping():\n return ping_response()",
"def ping():\n return ping_response()",
"def ping():\n return ping_response()",
"def ping():\n return ping_response()",
"def poll_loadbalancer_status(request, loadbalancer_id, callback,\n from_state='PENDING_UPDATE', to_state='ACTIVE',\n callback_kwargs=None):\n interval = conf.HORIZON_CONFIG['ajax_poll_interval'] / 1000.0\n status = from_state\n while status == from_state:\n time.sleep(interval)\n conn = get_sdk_connection(request)\n lb = conn.load_balancer.get_load_balancer(loadbalancer_id)\n status = lb.provisioning_status\n\n if status == to_state:\n kwargs = {'loadbalancer_id': loadbalancer_id}\n if callback_kwargs:\n kwargs.update(callback_kwargs)\n callback(request, **kwargs)",
"def _request_status_update(self) -> None:\n for channel_id, channel in self.channels.items():\n send_packet = Packet(\n packet_type=PACKET_TYPE_REQUEST_STATUSES,\n subject_id=SYSTEM_CHANNEL_ID,\n data={},\n )\n channel.enqueue_send(send_packet)",
"def refresh_status() -> None:\n ...",
"def setup_poll(self):\n while True:\n try:\n self.do_polling()\n time.sleep(0.01)\n except KeyboardInterrupt:\n print(self.get_stream())\n exit()",
"def poll_for_active_status(self, server_id, req_status=\"ACTIVE\"):\n status = \"BUILDING\"\n iteration = 30\n while status.upper() != req_status.upper() \\\n or status.upper() != \"ERROR\":\n server_info = self.show_server(server_id)\n if not isinstance(server_info, dict):\n return\n status = server_info['status']\n LOG_OBJ.debug(\"Server status : %s\" % status)\n if status.upper() in [req_status.upper(), 'ERROR']:\n break\n LOG_OBJ.debug(\"Waiting till server goes to %s state...\"\n % req_status)\n time.sleep(20)\n iteration -= 1\n if not iteration:\n err_msg = \"The server:%s is NOT in %s state\" \\\n \"within 10 minutes\" % (server_id, status)\n LOG_OBJ.error(err_msg)\n return \"POLL_TIME_EXCEEDED\"\n\n LOG_OBJ.debug(\"Server becomes %s\" % status)\n\n return status",
"def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)",
"def poll_processing_status(model_url):\n max_errors = 10\n errors = 0\n retry = 0\n max_retries = 50\n retry_timeout = 5 # seconds\n\n print\n 'Start polling processing status for model'\n\n while (retry < max_retries) and (errors < max_errors):\n print\n 'Try polling processing status (attempt #{}) ...'.format(retry)\n\n try:\n r = requests.get(model_url, **_get_request_payload())\n except requests.exceptions.RequestException as e:\n print\n 'Try failed with error {}'.format(e)\n errors += 1\n retry += 1\n continue\n\n result = r.json()\n\n if r.status_code != requests.codes.ok:\n print\n 'Upload failed with error: {}'.format(result['error'])\n errors += 1\n retry += 1\n continue\n\n processing_status = result['status']['processing']\n\n if processing_status == 'PENDING':\n print\n 'Your model is in the processing queue. Will retry in {} seconds'.format(\n retry_timeout)\n print\n 'Want to skip the line? Get a pro account! https://sketchfab.com/plans'\n retry += 1\n sleep(retry_timeout)\n continue\n elif processing_status == 'PROCESSING':\n print\n 'Your model is still being processed. Will retry in {} seconds'.format(\n retry_timeout)\n retry += 1\n sleep(retry_timeout)\n continue\n elif processing_status == 'FAILED':\n print\n 'Processing failed: {}'.format(result['error'])\n return False\n elif processing_status == 'SUCCEEDED':\n print\n 'Processing successful. Check your model here: {}'.format(\n model_url)\n return True\n\n retry += 1\n\n print\n 'Stopped polling after too many retries or too many errors'\n return False",
"def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))"
] | [
"0.66186523",
"0.6358219",
"0.5927366",
"0.58907",
"0.58850914",
"0.5843748",
"0.5833384",
"0.5741044",
"0.5719501",
"0.5672158",
"0.56507057",
"0.56396514",
"0.5633935",
"0.5629005",
"0.5579656",
"0.5574",
"0.5571632",
"0.5566738",
"0.5507098",
"0.5507098",
"0.5507098",
"0.5507098",
"0.54977596",
"0.548537",
"0.54745996",
"0.5473374",
"0.54663545",
"0.546206",
"0.54585326",
"0.54327554"
] | 0.68012106 | 0 |
Process ONU status for this PON | def _update_onu_status(self, onus):
for onu_id, onu_status in onus.iteritems():
if onu_id in self._onu_by_id:
self._onu_by_id[onu_id].rssi = onu_status.rssi
self._onu_by_id[onu_id].equalization_delay = onu_status.equalization_delay
self._onu_by_id[onu_id].fiber_length = onu_status.fiber_length | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_status_poll(self, status):\n self.log.debug('process-status-poll', status=status)\n\n if self._admin_state != AdminState.ENABLED:\n return\n\n # Get new/missing from the discovered ONU leaf. Stale ONUs from previous\n # configs are now cleaned up during h/w re-sync/reflow.\n\n new, rediscovered_onus = self._process_status_onu_discovered_list(status.discovered_onu)\n\n # Process newly discovered ONU list and rediscovered ONUs\n\n for serial_number in new | rediscovered_onus:\n reactor.callLater(0, self.add_onu, serial_number, status)\n\n # Process LOS list\n self._process_los_alarms(frozenset(status.ont_los))\n\n # Process ONU info. Note that newly added ONUs will not be processed\n # until the next pass\n\n self._update_onu_status(status.onus)",
"def output_status(self, on=False):\n if on:\n enabled = str(1)\n else:\n enabled = str(0)\n function_string = 'OP' + self.output + ' ' + enabled\n return self.scpi_comm(function_string)",
"def processInterfaceStatusUpdate(iTag, status): #@NoSelf",
"def update_status(self):\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()",
"def processStatus(name, verbose=False):\n imrclient.update_server_info()\n imrclient.process_status(name, verbose)",
"def set_pltifu_status(self, plate, ifudesign, status='queued'):\n # Get the name of the status file \n root = manga.MaNGAConfig(plate, ifudesign, drpver=self.drpver,\n redux_path=self.redux_path).cfg_root\n self.set_status(str(self.calling_path / str(plate) / str(ifudesign) / root), status)",
"def on_all(self):\n self._set_status(\"on\", '11111111')",
"def status_callback(self, data):\n\n print \"arm status callback\", data.data\n if data.data == \"busy\" or data.data == \"error\":\n self.status = 0\n elif data.data == \"free\":\n self.status = 1",
"def _get_onu_info(self, serial_number):\n try:\n from flow.demo_data import get_tconts, get_gem_ports, get_onu_id\n \n if self.activation_method == \"autoactivate\":\n onu_id = get_onu_id(serial_number)\n if onu_id is None:\n onu_id = self.get_next_onu_id()\n enabled = True\n channel_speed = 0\n tconts = get_tconts(serial_number, onu_id)\n gem_ports = get_gem_ports(serial_number, onu_id)\n vont_ani = None\n\n elif self.activation_method == \"autodiscovery\":\n if self.authentication_method == 'serial-number':\n gpon_info = self.olt.get_xpon_info(self.pon_id)\n\n try:\n # TODO: Change iteration to itervalues below\n vont_info = next(info for _, info in gpon_info['v-ont-anis'].items()\n if info.get('expected-serial-number') == serial_number)\n vont_ani = vont_info['data']\n\n onu_id = vont_info['onu-id']\n enabled = vont_info['enabled']\n channel_speed = vont_info['upstream-channel-speed']\n\n tconts = {key: val for key, val in gpon_info['tconts'].iteritems()\n if val.vont_ani == vont_info['name']}\n tcont_names = set(tconts.keys())\n\n gem_ports = {key: val for key, val in gpon_info['gem-ports'].iteritems()\n if val.tconf_ref in tcont_names}\n\n except StopIteration:\n self.log.debug('no-vont-ony')\n return None # Can happen if vont-ani/serial-number has not yet been configured\n else:\n self.log.debug('not-serial-number-authentication')\n return None\n else:\n self.log.debug('not-auto-discovery')\n return None\n\n onu_info = {\n 'device-id': self.olt.device_id,\n 'serial-number': serial_number,\n 'xpon-name': None,\n 'pon': self,\n 'onu-id': onu_id,\n 'enabled': enabled,\n 'upstream-channel-speed': channel_speed,\n 'password': Onu.DEFAULT_PASSWORD,\n 't-conts': tconts,\n 'gem-ports': gem_ports,\n 'onu-vid': self.olt.get_channel_id(self._pon_id, onu_id),\n 'channel-id': self.olt.get_channel_id(self._pon_id, onu_id),\n 'vont-ani': vont_ani\n }\n # Hold off ONU activation until at least one GEM Port is defined.\n self.log.debug('onu-info', gem_ports=gem_ports)\n\n return onu_info\n # return onu_info if len(gem_ports) > 0 else None\n\n except Exception as e:\n self.log.exception('get-onu-info', e=e)\n return None",
"def status():\n pass",
"def remote_status():",
"def comando_status(self):\r\n\tif args.tipo == 'web':\r\n return self.status_web()\r\n\r\n\tif args.tipo == 'nfce':\r\n return self.consulta_status_nfce()\r\n\r\n\tif args.tipo == 'dual':\r\n return self.status_impressora_dual()",
"def getStatus():",
"def on_status_update(self, data):\n # TODO: Update User/Client object with this info\n print ('Status Update: %s' % data)",
"def is_on(self, in_call):\n # print(\"is_on here\", self.dname, self.values[self.dname + '.' + self.cnd['chans'][0]])\n self.error_code = self.cnd['err_code']\n if self.values[self.dname + '.' + self.cnd['chans'][0]]:\n self.fail_count['is_on'] = 0\n else:\n self.fail_count['is_on'] = 1\n self.log_manager('is_on')",
"def on_status(self, status):\n log.debug(\"Received status: %d\", status.id)",
"def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1",
"def _process_status_onu_discovered_list(self, discovered_onus):\n self.log.debug('discovered-ONUs', list=discovered_onus)\n\n # Only request discovery if activation is auto-discovery or auto-activate\n continue_discovery = ['autodiscovery', 'autoactivate']\n\n if self._activation_method not in continue_discovery:\n return set(), set()\n\n my_onus = frozenset(self._onus.keys())\n\n new_onus = discovered_onus - my_onus\n rediscovered_onus = my_onus & discovered_onus\n\n return new_onus, rediscovered_onus",
"def _set_status(self, action, status):\n cmd = \"curl http://{}:{}@{}/{}s.cgi?led={}\".format(self.config['username'],\n self.config['password'],\n self.config['host'],\n action,\n status)\n self.log.info(\"PDU cmd: {}\".format(cmd))\n utils.start_standing_subprocess(cmd)\n time.sleep(10)",
"def status(update: Update, context: CallbackContext) -> None:\n str_list = ['Sauna main power is ']\n if __sauna.control.getPortValue(\"Mains Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna power switch is ')\n if __sauna.control.getPortValue(\"Power Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna oven is currently ')\n if __sauna.control.getPortValue(\"Oven Sensor\") == 1:\n str_list.append('HEATING.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna light is ')\n if __sauna.control.getPortValue(\"Light Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n t1 = __sauna.control.getPortValue(\"Temperature Sensor\")\n t2 = float(\"{:.1f}\".format(t1))\n str_list.append('Sauna temp is currently ' + str(t2) + ' C.\\n')\n\n temp_str = str(__sauna.control.getUpperLimit(\"Temperature Sensor\"))\n str_list.append('Sauna temp is going to ' + temp_str + ' C.\\n')\n update.message.reply_text(''.join(str_list))\n\n name = __sauna.pi_address\n update.message.reply_text(name)",
"def status(self):",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"def report_status_up(self):\n self._update_sandesh_status(ConnectionStatus.UP)",
"def _UpdateProcessingStatus(self, pid, process_status, used_memory):",
"def onu_id(self):\n return self._packet['onu-id']",
"def status(self):\n self.scion_sh('status')",
"def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status",
"def or_conn_status_event(self, event):\r\n pass",
"def updateStatus(self, status):\n pass",
"def update_status_info (cls, nffg, status,\n log=logging.getLogger(\"UPDATE-STATUS\")):\n log.debug(\"Add %s status for NFs and Flowrules...\" % status)\n for nf in nffg.nfs:\n nf.status = status\n for infra in nffg.infras:\n for flowrule in infra.flowrules():\n flowrule.status = status\n return nffg"
] | [
"0.655683",
"0.6364406",
"0.5901802",
"0.5859409",
"0.573432",
"0.5732758",
"0.57218087",
"0.567886",
"0.5675471",
"0.56213325",
"0.55984366",
"0.55904573",
"0.5551701",
"0.5540505",
"0.55091304",
"0.54808426",
"0.547645",
"0.54632586",
"0.54630697",
"0.5462684",
"0.54545075",
"0.54458433",
"0.54179674",
"0.5412536",
"0.54008424",
"0.539518",
"0.53929824",
"0.5381729",
"0.5375677",
"0.53595597"
] | 0.70205426 | 0 |
Parse through available xPON information for ONU configuration settings | def _get_onu_info(self, serial_number):
try:
from flow.demo_data import get_tconts, get_gem_ports, get_onu_id
if self.activation_method == "autoactivate":
onu_id = get_onu_id(serial_number)
if onu_id is None:
onu_id = self.get_next_onu_id()
enabled = True
channel_speed = 0
tconts = get_tconts(serial_number, onu_id)
gem_ports = get_gem_ports(serial_number, onu_id)
vont_ani = None
elif self.activation_method == "autodiscovery":
if self.authentication_method == 'serial-number':
gpon_info = self.olt.get_xpon_info(self.pon_id)
try:
# TODO: Change iteration to itervalues below
vont_info = next(info for _, info in gpon_info['v-ont-anis'].items()
if info.get('expected-serial-number') == serial_number)
vont_ani = vont_info['data']
onu_id = vont_info['onu-id']
enabled = vont_info['enabled']
channel_speed = vont_info['upstream-channel-speed']
tconts = {key: val for key, val in gpon_info['tconts'].iteritems()
if val.vont_ani == vont_info['name']}
tcont_names = set(tconts.keys())
gem_ports = {key: val for key, val in gpon_info['gem-ports'].iteritems()
if val.tconf_ref in tcont_names}
except StopIteration:
self.log.debug('no-vont-ony')
return None # Can happen if vont-ani/serial-number has not yet been configured
else:
self.log.debug('not-serial-number-authentication')
return None
else:
self.log.debug('not-auto-discovery')
return None
onu_info = {
'device-id': self.olt.device_id,
'serial-number': serial_number,
'xpon-name': None,
'pon': self,
'onu-id': onu_id,
'enabled': enabled,
'upstream-channel-speed': channel_speed,
'password': Onu.DEFAULT_PASSWORD,
't-conts': tconts,
'gem-ports': gem_ports,
'onu-vid': self.olt.get_channel_id(self._pon_id, onu_id),
'channel-id': self.olt.get_channel_id(self._pon_id, onu_id),
'vont-ani': vont_ani
}
# Hold off ONU activation until at least one GEM Port is defined.
self.log.debug('onu-info', gem_ports=gem_ports)
return onu_info
# return onu_info if len(gem_ports) > 0 else None
except Exception as e:
self.log.exception('get-onu-info', e=e)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_knx(self, config):\n\t\tif \"knx\" in config:\n\t\t\tself._knx = config[\"knx\"]\n\t\tfor item in self._knx[\"sensors\"]:\n\t\t\tif not \"address\" in item:\n\t\t\t\traise ValueError(\"Missing address for KNX sensor\")\n\t\tfor item in self._knx[\"switches\"]:\n\t\t\tif not \"address\" in item:\n\t\t\t\traise ValueError(\"Missing address for KNX switch\")",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )",
"def parse_panel_config_resp(self, data):\n\n # pumps 0-5\n self.pump_array[0] = int((data[5] & 0x03))\n self.pump_array[1] = int((data[5] & 0x0c) >> 2)\n self.pump_array[2] = int((data[5] & 0x30) >> 4)\n self.pump_array[3] = int((data[5] & 0xc0) >> 6)\n self.pump_array[4] = int((data[6] & 0x03))\n self.pump_array[5] = int((data[6] & 0xc0) >> 6)\n\n # lights 0-1\n self.light_array[0] = int((data[7] & 0x03) != 0)\n self.light_array[1] = int((data[7] & 0xc0) != 0)\n\n self.circ_pump = int((data[8] & 0x80) != 0)\n self.blower = int((data[8] & 0x03) != 0)\n self.mister = int((data[9] & 0x30) != 0)\n\n self.aux_array[0] = int((data[9] & 0x01) != 0)\n self.aux_array[1] = int((data[9] & 0x02) != 0)\n\n self.config_loaded = True",
"def getZaptelConf(self):\n output = []\n for portInd, portLine in enumerate(self.portLines):\n if self[portInd]['type'] != 'na':\n values = self[portInd]\n values['type'] = values['type'] == 'fxs' and \"fxo\" or 'fxs' #Hmm crazy zaptel idea that your fxo is your fxs in zapata but the correct way around in zaptel\n output.append(\"%(type)s%(signalling)s=\" % self[portInd] + str(portLine[0]))\n return output",
"def _build_parsed_values(self):\n\n SERIAL_NUMBER = \"SerialNumber\"\n PROFILE_MODE = \"ProfileMode\"\n BATTERY = \"Battery\"\n DATA_CHANNELS = \"DataChannels\"\n\n # check to make sure there is a correct match before continuing\n match = SBE19ConfigurationParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed configuration data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n result = [{DataParticleKey.VALUE_ID: SBE19ConfigurationParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number}]\n\n result.append(self._get_xml_parameter(root, SBE19ConfigurationParticleKey.ECHO_CHARACTERS, self.yesno2bool))\n result.append(self._get_xml_parameter(root, SBE19ConfigurationParticleKey.OUTPUT_EXECUTED_TAG, self.yesno2bool))\n result.append(self._get_xml_parameter(root, SBE19ConfigurationParticleKey.OUTPUT_FORMAT, str))\n\n element = self._extract_xml_elements(root, PROFILE_MODE)[0]\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.SCANS_TO_AVERAGE, int))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.MIN_COND_FREQ, int))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.PUMP_DELAY, int))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.AUTO_RUN, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.IGNORE_SWITCH, self.yesno2bool))\n\n element = self._extract_xml_elements(root, BATTERY)[0]\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.BATTERY_TYPE, str))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.BATTERY_CUTOFF))\n\n element = self._extract_xml_elements(root, DATA_CHANNELS)[0]\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_0, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_1, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_2, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_3, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_4, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.EXT_VOLT_5, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.SBE38, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.WETLABS, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.OPTODE, self.yesno2bool))\n result.append(self._get_xml_parameter(element, SBE19ConfigurationParticleKey.SBE63, self.yesno2bool))\n result.append(\n self._get_xml_parameter(element, SBE19ConfigurationParticleKey.GAS_TENSION_DEVICE, self.yesno2bool))\n\n return result",
"def readSettings(self):\n for i in range(1,N_STATION+1):\n vol = f\"vol{i}\"\n self.param.vol[i-1] = self.settings.value(vol,type=int)\n info = f\"info{i}\"\n self.param.info[i-1] = self.settings.value(info,type=str)\n ip = f\"ip{i}\"\n self.param.ip[i-1] = self.settings.value(ip,type=str)\n muted = f\"muted{i}\"\n self.param.muted[i-1] = self.settings.value(muted,type=bool)",
"def load_ups_conf(self):\n conf = ConfigParser()\n conf.read(UPS_CONF_FILENAME)\n self.ups_names = [s for s in conf.sections()] #pylint: disable=unnecessary-comprehension\n ups_confs = dict()\n for ups in self.ups_names:\n section = conf[ups]\n ups_confs[ups] = {key: val for key, val in section.items()} #pylint: disable=unnecessary-comprehension\n self.ups_confs = ups_confs",
"def parse_config_resp(self, data):\n\n macaddr = f'{data[8]:x}:{data[9]:x}:{data[10]:x}'\\\n f':{data[11]:x}:{data[12]:x}:{data[13]:x}'\n\n pump_array = [0, 0, 0, 0, 0, 0]\n pump_array[0] = int((data[5] & 0x03))\n pump_array[1] = int((data[5] & 0x0c) >> 2)\n pump_array[2] = int((data[5] & 0x30) >> 4)\n pump_array[3] = int((data[5] & 0xc0) >> 6)\n pump_array[4] = int((data[6] & 0x03))\n pump_array[5] = int((data[6] & 0xc0) >> 6)\n\n light_array = [0, 0]\n # not a typo\n light_array[1] = int((data[7] & 0x03) != 0)\n light_array[0] = int((data[7] & 0xc0) != 0)\n\n return (macaddr, pump_array, light_array)",
"def _parse_metadata(config):\n if not config.active or config.device_metadata is None:\n return None\n\n width, height = config.width, config.height\n points = []\n for point in config.device_metadata.split(\"|\"):\n try:\n x, y = point.split(\";\")\n points.append([float(x) * width, float(y) * height])\n except:\n return None\n\n if len(points) != 4:\n return None\n\n return points",
"def test_lsusb_extra_hub_port_status_info(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_extra_hub_port_status_info, quiet=True), self.generic_lsusb_extra_hub_port_status_info_json)",
"def test_RHNConfig(self):\n\n # make sure all of the fields are here\n cfg = ospsurvey.probes.software.get_rhn_config('tests/data/up2date')\n self.assertEqual(len(cfg.keys()), 21)\n\n # make sure none have [comment] in them\n comment_keys = [c for c in cfg.keys() if re.match('.*\\[comment\\].*', c)]\n self.assertEqual(len(comment_keys), 0)\n\n # make sure the blank fields are correctly processed\n blank_fields = [f for f in cfg.keys() if cfg[f] == '']\n self.assertEqual(len(blank_fields), 5)",
"def read_setup(inifile):\n # inifile = os.path.join(spathy_path, inifile)\n print(inifile)\n cfg = configparser.ConfigParser()\n cfg.read(inifile)\n\n pp = {}\n for s in cfg.sections():\n section = s.encode('ascii', 'ignore')\n pp[section] = {}\n for k, v in cfg.items(section):\n key = k.encode('ascii', 'ignore')\n val = v.encode('ascii', 'ignore')\n if section == 'General': # 'general' section\n pp[section][key] = val\n else:\n pp[section][key] = float(val)\n pp['General']['dt'] = float(pp['General']['dt'])\n\n pgen = pp['General']\n pcpy = pp['CanopyGrid']\n pbu = pp['BucketGrid']\n ptop = pp['Topmodel']\n\n return pgen, pcpy, pbu, ptop",
"def _extract_settings(settings):\n try:\n api_username = settings.get('manager', 'api_username')\n api_key = settings.get('manager', 'api_key')\n api_url = settings.get('manager', 'api_url')\n except (ConfigParser.NoSectionError,\n ConfigParser.NoOptionError):\n raise ValueError('Missing configuration: manager')\n\n url = api_url\n if not url.startswith('http://'):\n url = 'http://' + url\n\n return (url, api_username, api_key)",
"def seninfo(index):\n supported_psu = list(range(1, _wrapper_get_num_psus() + 1))\n psu_ids = []\n if (index < 0):\n psu_ids = supported_psu\n else:\n psu_ids = [index]\n\n for psu in psu_ids:\n psu_name = _wrapper_get_psu_name(psu)\n if psu not in supported_psu:\n click.echo(\"Error! The {} is not available on the platform.\\n\" \\\n \"Number of supported PSU - {}.\".format(psu_name, len(supported_psu)))\n continue\n oper_status = _wrapper_get_psu_status(psu)\n \n if not oper_status:\n click.echo(\"{} is Not OK\\n\".format(psu_name))\n continue\n\n v_out = _wrapper_get_output_voltage(psu) * 1000\n i_out = _wrapper_get_output_current(psu) * 1000\n p_out = _wrapper_get_output_power(psu) * 1000\n\n fan1_rpm = _wrapper_get_fan_rpm(psu, 1)\n click.echo(\"{} is OK\\nOutput Voltage: {} mv\\n\" \\\n \"Output Current: {} ma\\nOutput Power: {} mw\\n\" \\\n \"Fan1 Speed: {} rpm\\n\".format(psu_name, v_out, i_out, p_out, fan1_rpm))",
"def read_settings(settings):\n with open(settings) as conf_file:\n for line in conf_file:\n if \"OP\" in line:\n opcodes = unpack(conf_file)\n if \"MODES\" in line:\n adrmodes = unpack(conf_file)\n\n return (opcodes, adrmodes)",
"def antenny_config_print_values(self):\n return self.antenny_config.print_values()",
"def parse_conf(self):\n\n parser = configparser.RawConfigParser()\n parser.read(self.filename)\n\n try:\n self.id_node = parser['CONF_MACHINE']['ID_NODE']\n\n # eliminate possible white spaces between metrics\n temp = parser['CONF_MACHINE']['METRICS'].split(',')\n for itr in temp:\n self.metrics.append(itr.strip())\n\n except Exception:\n raise Exception(\"missing id or metrics\")\n\n try:\n self.interval = parser['CONF_MAHCINE']['INTERVAL']\n except Exception:\n self.interval = 1\n\n try:\n self.ampq_url = parser['ampq']['url']\n self.ampq_port = parser['ampq']['port']\n self.ampq_vhost = parser['ampq']['vhost']\n self.ampq_user = parser['ampq']['user']\n self.ampq_password = parser['ampq']['password']\n except Exception:\n raise Exception(\"missing ampq configs\")",
"def config_parse_file():\n global ANGELCO_EMAIL, ANGELCO_PASSWORD\n\n print(\"Parsing the config file...\")\n config = configparser.ConfigParser()\n with open('dwh.cfg') as configfile:\n config.read_file(configfile)\n\n ANGELCO_EMAIL = config.get('ANGELCO', 'EMAIL')\n ANGELCO_PASSWORD = config.get('ANGELCO', 'PASSWORD')",
"def x_list():\n\t_loadconfig()",
"def getPineAPSettings(self):\n return self.request('getPineAPSettings')",
"def _check_config(self):",
"def getZapataConf(self):\n #cProf = briProfiles[self['briconfig']] #Grab the config profile\n #output = self.mergeConfigList(cProf, briConfigList)\n output = []\n for portInd, portLine in enumerate(self.portLines[:-1]):\n if self[portInd]['type'] == 'na':\n continue\n signalling = str.join('_', (self[portInd]['type'], self[portInd]['signalling']))\n output.append(\"group = \"+ str.join(', ', self.pluginEntity.getPortGroup(portLine[1])))\n #Get CallerID\n output.append(\"callerid = \" + self[portInd]['callerid'])\n #Get PickupGroup\n output.append(\"callgroup = \" + self[portInd]['callgroup'])\n output.append(\"pickupgroup = \" + self[portInd]['pickupgroup'])\n #Context Bindings\n output.append(\"context = \"+ self[portInd]['context'])\n output.append(\"signalling = \"+ signalling) \n output.append(\"channel = \"+ str(portLine[0]))\n return output",
"def parseProperties(self) -> bool:\n\n # vNetIds is not a mandatory property. This property can be used if the resources are distributed across multiple vNets.\n self.vNetIds = self.providerProperties.get(\"vNetIds\", None)\n\n # enabledProviders contains the provider types for which AIOps is enabled. Mandatory property.\n self.enabledProviders = self.providerProperties.get(\n \"enabledProviders\", None)\n if not self.enabledProviders:\n self.tracer.error(\n \"[%s] enabledProviders cannot be empty in the AIOps config.\" % self.fullName)\n return False\n return True",
"def _parse_general_info_V4X(par, parfile):\n line = None\n while line != '':\n pos = parfile.tell()\n line = parfile.readline()\n #Parse the useful parts of the general info entry on the left and\n #right of the colon: key and value\n m = re.search(r'\\. ([^<>\\(\\)\\[\\]]*[a-zA-Z]).*: *(.*)', line)\n if not m:\n parfile.seek(pos)\n break\n key, val = m.group(1, 2)\n key = _sanitize_to_identifer(key).lower()\n #Try to guess the type of the field by conversion\n _val_split = val.split()\n if len(_val_split) > 1:\n try:\n val = np.array(tuple(float(x) for x in _val_split))\n except:\n pass\n else:\n try:\n val = int(val)\n except ValueError:\n pass\n #logger.debug(\"Key = '{0}' Val = '{1}'\".format(key, val))\n setattr(par.gen_info, key, val)\n return par.gen_info",
"def get_snmp_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/snmp-setting\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def test_iosxr_netconf_get_config(nornir):\n nr = nornir.filter(name=DEVICE_NAME)\n\n result = nr.run(\n netconf_get_config,\n source=\"running\",\n path=\"\"\"\n <interfaces xmlns=\"http://openconfig.net/yang/interfaces\">\n </interfaces>\n \"\"\",\n filter_type=\"subtree\",\n xmldict=True,\n )\n assert \"MgmtEth0/0/CPU0/0\" == result[DEVICE_NAME].result[\"xml_dict\"][\"data\"][\"interfaces\"][\"interface\"][0][\"name\"]\n assert result[DEVICE_NAME].result[\"rpc\"]\n assert result[DEVICE_NAME].result[\"rpc\"].data_xml\n # with open(\"tests/test_data/get-iosxr-config.xml\", \"w+\") as file:\n # file.write(result[DEVICE_NAME].result[\"rpc\"].data_xml)",
"def getapxs_location():\n return getconfigure_option(\"APXS\")",
"def extract_from_config(self):\n self.config_file = self.get_the_right_config(self.port)\n config_data = self.loads(self.read_file(self.config_file))\n for k,v in {'max_download': 'max_download_connections', 'peer_id': 'peer_id'}.items():\n self.items[k] = int(config_data['connection_config'][v])\n self.items['MaxDownloadConnection'] = self.items['max_download']\n return self",
"def parse_main(self):\n try:\n self.common_config[\"debug\"] = self.config.get('main', 'debug')\n except ConfigParser.NoOptionError:\n self.common_config[\"debug\"] = \"FALSE\"\n \n try:\n conf_local_ip = self.config.get('main', 'local_ip')\n if is_valid_ipv4_address(conf_local_ip):\n self.common_config[\"local_ip\"] = conf_local_ip\n \n elif conf_local_ip == \"default\": #if loca_if == \"default\" try to reach google.com\n try:\n self.common_config[\"local_ip\"] = get_ip_address()\n except Exception, e:\n self.logger.configError(\"cannot discover local ip address: %s\" % e)\n sys.exit(1)\n\n else: #network interface name\n try:\n self.common_config[\"local_ip\"] = get_ip_address_ifname(conf_local_ip)\n except Exception, e:\n self.logger.configError(\"cannot determine ip address of %s interface: %s\" % (conf_local_ip, e))\n sys.exit(1)\n\n except ConfigParser.NoOptionError: \n self.logger.configError(\"Missing mandatory parameters in config file, bailing out!\")\n sys.exit(1)\n\n try:\n log_file = self.common_config[\"log_file\"] = self.config.get('main', 'log_file') \n if log_file.startswith(\"syslog\"):\n try:\n syslog_host = log_file.split(\":\")[1]\n except IndexError:\n syslog_host = 'localhost'\n try:\n syslog_port = int(log_file.split(\":\")[2])\n except IndexError:\n syslog_port = 514\n try:\n syslog_facility = log_file.split(\":\")[3]\n except IndexError:\n syslog_facility = logging.handlers.SysLogHandler.LOG_USER\n self.logger.debugMessage(\"Logging to syslog (host: %s, port: %s, facility: %s)\" % ((syslog_host, syslog_port, syslog_facility)))\n self.common_config[\"conf_log_handler\"] = logging.handlers.SysLogHandler((syslog_host, syslog_port), syslog_facility)\n else:\n self.logger.debugMessage(\"Logging to file: %s\" % log_file)\n try:\n self.common_config[\"conf_log_handler\"] = logging.FileHandler(log_file)\n except IOError, e:\n self.logger.configError(\"cannot access to the log file: %s\" % e)\n sys.exit(1)\n \n except ConfigParser.NoOptionError: \n # no log defined in config file\n self.common_config[\"conf_log_handler\"] = None\n \n try:\n self.common_config[\"daemon\"] = self.config.get('main', 'daemon')\n except ConfigParser.NoOptionError:\n self.common_config[\"daemon\"] = None\n try:\n self.common_config[\"pid_file\"] = self.config.get('main', 'pid_file')\n except ConfigParser.NoOptionError:\n self.common_config[\"pid_file\"] = None\n\n \n return self.common_config"
] | [
"0.5860691",
"0.53012323",
"0.52924263",
"0.5150295",
"0.50907314",
"0.49680468",
"0.49017316",
"0.4875954",
"0.48654738",
"0.48572677",
"0.48541096",
"0.48308924",
"0.48128533",
"0.4808714",
"0.47831556",
"0.47818446",
"0.4748246",
"0.47355053",
"0.47156107",
"0.47149676",
"0.4703369",
"0.47015816",
"0.46948195",
"0.46746027",
"0.4669662",
"0.46526945",
"0.4652081",
"0.46165943",
"0.46126708",
"0.46048418"
] | 0.56708354 | 1 |
Called when a new ONU is discovered and VOLTHA device adapter needs to be informed | def activate_onu(self, onu):
if self.olt.autoactivate:
self.log.info('activate-onu', onu=onu)
olt = self.olt
adapter = self.adapter_agent
channel_id = onu.onu_vid
proxy = onu.proxy_address
# NOTE: The following method will be deprecated. Use xPON
adapter.child_device_detected(parent_device_id=olt.device_id,
parent_port_no=self._port_no,
child_device_type=onu.vendor_id,
proxy_address=proxy,
admin_state=AdminState.ENABLED,
vlan=channel_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device_discovered():\n event.set()",
"def on_connect(client, userdata, flags, rc):\n\t# subscribe to the LEDs topic when connected\n\tclient.subscribe(\"SNHU/IT697/leds\")",
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"def on_connect(client, userdata, flags, rcdata):\n client.subscribe(\"diy/system/fire\", 1)\n client.subscribe(\"diy/system/panic\", 1)\n client.subscribe(\"diy/system/who\", 1)",
"def _onconnect(self):\n\n pass",
"def discover(self):\n self.ola_thread.run_discovery(self.universe.get(), self._upon_discover)\n if self.auto_disc.get():\n self.ola_thread.add_event(5000, self.discover)\n else: \n print \"auto_disc is off\"",
"def device_connect(self):\n pass",
"def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))",
"def on_connect(unused_client, unused_userdata, unused_flags, rc):\n print('on_connect', mqtt.connack_string(rc))\n\n # This is the topic that the device will receive configuration updates on.\n mqtt_config_topic = '/devices/{}/config'.format(device_id)\n # Subscribe to the config topic.\n client.subscribe(mqtt_config_topic, qos=1)\n\n status_light.on()",
"def _onu_discovery_init_complete(self, _):\n delay = self._no_onu_discover_tick if len(self._onus) == 0 else self._discovery_tick\n delay += random.uniform(-delay / 10, delay / 10)\n self._discovery_deferred = reactor.callLater(delay, self._discover_onus)",
"def on_connect(self, client, userdata, flags, rc):\n# client.subscribe(\"power_meter/status/#\")\n client.subscribe(self.mqtt_topic_status)\n client.subscribe(self.mqtt_topic_electricity + '/#')\n client.subscribe(self.mqtt_topic_gas + '/#')\n client.subscribe(self.mqtt_topic_water + '/#')\n self.mqtt_client.publish(self.mqtt_topic_last_will, \"online, \" + str(self.dconn), qos=0, retain=True)\n self.connected = True\n self.log.warning(\"Connected with result code: \" + str(rc))\n self.log.info(\"Connected to: \" + MQTT_SERVER)",
"def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")",
"def on_connect(client, userdata, flags, rc):\n client.subscribe(mqtt_Light_topic)",
"def AutoConnect(self, fresh):\n if fresh:\n self.Scan()\n if self.CheckPluggedIn():\n self._wired_autoconnect()\n else:\n self._wireless_autoconnect()",
"def on_hid_pnp(self, hid_event = None):\r\n # keep old reference for UI updates\r\n old_device = self.device\r\n\r\n if hid_event:\r\n print(\"Hey, a hid device just %s!\" % hid_event)\r\n \r\n if hid_event == \"connected\":\r\n # test if our device is available\r\n if self.device:\r\n # see, at this point we could detect multiple devices!\r\n # but... we only want just one\r\n pass\r\n else:\r\n self.test_for_connection()\r\n elif hid_event == \"disconnected\":\r\n # the hid object is automatically closed on disconnection we just\r\n # test if still is plugged (important as the object might be\r\n # closing)\r\n if self.device and not self.device.is_plugged():\r\n self.device = None\r\n print(\"you removed my hid device!\")\r\n else:\r\n # poll for devices\r\n self.test_for_connection()\r\n\r\n if old_device != self.device:\r\n # update ui\r\n pass",
"def on_connect(self, client, userdata, flags, rc):\n\n logger.info(f'Connected to {self.topic} with result code {rc}')\n # self.client.publish('Augmented/A.V.A.', str(rc)) # For return the connection situation to the subscriber device.\n if rc == 0:\n self.is_connected = True\n self.client.subscribe(self.topic)",
"def on(self) -> None:\n ...",
"def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)",
"def _on_connection(self, *_):\n\n if self._user_apc_token is not None:\n # run asynchronous auto configure.\n self._controller._thread_loop.run_async(self._autoconfigure_run)",
"def onConnect(self, fetcher, connectionRespInfo): #$NON-NLS-1$\r",
"def on_mqtt_announce(self, topic, payload, qos=None, retain=None):\n wrk = topic.split('/')\n topic_type = wrk[0]\n tasmota_topic = wrk[1]\n info_topic = wrk[2]\n self.logger.info(f\"on_mqtt_announce: type={topic_type}, device={tasmota_topic}, info_topic={info_topic}, payload={payload}\")\n\n if not self.tasmota_devices.get(tasmota_topic, None):\n self.tasmota_devices[tasmota_topic] = {}\n self.tasmota_devices[tasmota_topic]['connected_to_item'] = False\n\n # ask for status info of this newly discovered tasmota device\n self.publish_topic('cmnd/' + tasmota_topic + '/STATUS', 0)\n\n if info_topic == 'LWT':\n self.tasmota_devices[tasmota_topic]['online'] = payload\n self.tasmota_devices[tasmota_topic]['online_timeout'] = datetime.now()+timedelta(seconds=self.telemetry_period+5)\n #self.logger.info(f\" - new 'online_timeout'={self.tasmota_devices[tasmota_topic]['online_timeout']}\")\n self.set_item_value(tasmota_topic, 'item_online', payload, info_topic)\n\n if info_topic == 'STATE':\n self.tasmota_devices[tasmota_topic]['uptime'] = payload.get('Uptime', '-')\n self.set_item_value(tasmota_topic, 'item_relay', payload.get('POWER','OFF') == 'ON', info_topic)\n\n self.tasmota_devices[tasmota_topic]['online_timeout'] = datetime.now()+timedelta(seconds=self.telemetry_period+5)\n self.set_item_value(tasmota_topic, 'item_online', True, info_topic)\n #self.logger.info(f\" - new 'online_timeout'={self.tasmota_devices[tasmota_topic]['online_timeout']}\")\n\n if info_topic == 'SENSOR':\n energy = payload.get('ENERGY', None)\n if energy is not None:\n self.tasmota_devices[tasmota_topic]['sensortype'] = 'ENERGY'\n self.tasmota_devices[tasmota_topic]['energy_sensors']['voltage'] = energy['Voltage']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['current'] = energy['Current']\n # Leistung, Scheinleistung, Blindleistung\n self.tasmota_devices[tasmota_topic]['energy_sensors']['power'] = energy['Power']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['apparent_power'] = energy['ApparentPower']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['reactive_power'] = energy['ReactivePower']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['factor'] = energy['Factor']\n # Verbrauch\n self.tasmota_devices[tasmota_topic]['energy_sensors']['total_starttime'] = energy['TotalStartTime']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['total'] = energy['Total']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['yesterday'] = energy['Yesterday']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['today'] = energy['Today']\n self.tasmota_devices[tasmota_topic]['energy_sensors']['period'] = energy.get('Period', None)\n\n self.set_item_value(tasmota_topic, 'item_voltage', self.tasmota_devices[tasmota_topic]['energy_sensors']['voltage'], info_topic)\n self.set_item_value(tasmota_topic, 'item_current', self.tasmota_devices[tasmota_topic]['energy_sensors']['current'], info_topic)\n self.set_item_value(tasmota_topic, 'item_power', self.tasmota_devices[tasmota_topic]['energy_sensors']['power'], info_topic)\n self.set_item_value(tasmota_topic, 'item_power_total', self.tasmota_devices[tasmota_topic]['energy_sensors']['total'], info_topic)\n self.set_item_value(tasmota_topic, 'item_power_yesterday', self.tasmota_devices[tasmota_topic]['energy_sensors']['yesterday'], info_topic)\n self.set_item_value(tasmota_topic, 'item_power_today', self.tasmota_devices[tasmota_topic]['energy_sensors']['today'], info_topic)\n\n self.tasmota_devices[tasmota_topic]['online_timeout'] = datetime.now()+timedelta(seconds=self.telemetry_period+5)\n self.set_item_value(tasmota_topic, 'item_online', True, info_topic)\n\n if info_topic == 'STATUS':\n fn = payload['Status'].get('FriendlyName', '')\n if fn != '':\n if fn[0] == '[' and fn[-1] == ']':\n fn = fn[1:-1]\n self.tasmota_devices[tasmota_topic]['friendly_name'] = fn\n self.set_item_value(tasmota_topic, 'item_relay', payload['Status'].get('Power', 0), info_topic)\n\n if info_topic == 'STATUS2':\n self.tasmota_devices[tasmota_topic]['fw_ver'] = payload['StatusFWR'].get('Version', '')\n if info_topic == 'STATUS5':\n self.tasmota_devices[tasmota_topic]['ip'] = payload['StatusNET'].get('IPAddress', '')\n self.tasmota_devices[tasmota_topic]['mac'] = payload['StatusNET'].get('Mac', '')\n\n if info_topic == 'STATUS9':\n #self.logger.info(f\"Topic={topic}, tasmota_topic={tasmota_topic}, info_topic={info_topic}\")\n #self.logger.info(f\" - Payload={payload}\")\n StatusPTH = payload.get('StatusPTH', {})\n #self.logger.info(f\" - StatusPTH={StatusPTH}\")\n\n # Get info direct after boot of client\n if info_topic == 'INFO1':\n self.tasmota_devices[tasmota_topic]['fw_ver'] = payload.get('Version', '')\n if info_topic == 'INFO2':\n self.tasmota_devices[tasmota_topic]['ip'] = payload.get('IPAddress', '')\n if info_topic == 'INFO3':\n restart_reason = payload.get('RestartReason', '')\n self.logger.warning(f\"Device {tasmota_topic} (IP={self.tasmota_devices[tasmota_topic]['ip']}) just startet. Reason={restart_reason}\")\n\n return",
"def on_connect(self, mqtt_client, userdata, flags, rc):\n global connack\n logging.debug(\"DEBUG - Connected to broker\")\n connack = True",
"def on_connect(unused_client, unused_userdata, unused_flags, rc):\n\tprint('on_connect', mqtt.connack_string(rc))\n\n\t# After a successful connect, reset backoff time and stop backing off.\n\tglobal should_backoff\n\tglobal minimum_backoff_time\n\tshould_backoff = False\n\tminimum_backoff_time = 1",
"def on(self) -> None:",
"def _get_onu_info(self, serial_number):\n try:\n from flow.demo_data import get_tconts, get_gem_ports, get_onu_id\n \n if self.activation_method == \"autoactivate\":\n onu_id = get_onu_id(serial_number)\n if onu_id is None:\n onu_id = self.get_next_onu_id()\n enabled = True\n channel_speed = 0\n tconts = get_tconts(serial_number, onu_id)\n gem_ports = get_gem_ports(serial_number, onu_id)\n vont_ani = None\n\n elif self.activation_method == \"autodiscovery\":\n if self.authentication_method == 'serial-number':\n gpon_info = self.olt.get_xpon_info(self.pon_id)\n\n try:\n # TODO: Change iteration to itervalues below\n vont_info = next(info for _, info in gpon_info['v-ont-anis'].items()\n if info.get('expected-serial-number') == serial_number)\n vont_ani = vont_info['data']\n\n onu_id = vont_info['onu-id']\n enabled = vont_info['enabled']\n channel_speed = vont_info['upstream-channel-speed']\n\n tconts = {key: val for key, val in gpon_info['tconts'].iteritems()\n if val.vont_ani == vont_info['name']}\n tcont_names = set(tconts.keys())\n\n gem_ports = {key: val for key, val in gpon_info['gem-ports'].iteritems()\n if val.tconf_ref in tcont_names}\n\n except StopIteration:\n self.log.debug('no-vont-ony')\n return None # Can happen if vont-ani/serial-number has not yet been configured\n else:\n self.log.debug('not-serial-number-authentication')\n return None\n else:\n self.log.debug('not-auto-discovery')\n return None\n\n onu_info = {\n 'device-id': self.olt.device_id,\n 'serial-number': serial_number,\n 'xpon-name': None,\n 'pon': self,\n 'onu-id': onu_id,\n 'enabled': enabled,\n 'upstream-channel-speed': channel_speed,\n 'password': Onu.DEFAULT_PASSWORD,\n 't-conts': tconts,\n 'gem-ports': gem_ports,\n 'onu-vid': self.olt.get_channel_id(self._pon_id, onu_id),\n 'channel-id': self.olt.get_channel_id(self._pon_id, onu_id),\n 'vont-ani': vont_ani\n }\n # Hold off ONU activation until at least one GEM Port is defined.\n self.log.debug('onu-info', gem_ports=gem_ports)\n\n return onu_info\n # return onu_info if len(gem_ports) > 0 else None\n\n except Exception as e:\n self.log.exception('get-onu-info', e=e)\n return None",
"def up_callback(self):\n self.rokucontrol.up_callback()",
"def on_connect():\n print('Market Data Socket connected successfully!')\n\n # Subscribe to instruments\n print('Sending subscription request for Instruments - \\n' + str(Instruments))\n response = xt.send_subscription(Instruments, 1501)\n print('Sent Subscription request!')\n print(\"Subscription response: \", response)",
"def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)",
"def on_connect(self, client, userdata, flags, rc):\n\n\t\tself.subscribe(\"system\")\n\t\tprint (\"[{}] Client connected\".format(\n\t\t\tint(time.time())\n\t\t))",
"def __init__(self):\n super(UpnpEmbeddedDevice, self).__init__()\n return"
] | [
"0.65213245",
"0.6183199",
"0.6078185",
"0.6038242",
"0.6008273",
"0.5960707",
"0.59480184",
"0.5902333",
"0.58434427",
"0.5833241",
"0.5729351",
"0.5601769",
"0.55898815",
"0.55544865",
"0.55472565",
"0.55431855",
"0.55277807",
"0.55186784",
"0.5476209",
"0.5475365",
"0.5475079",
"0.54739404",
"0.54557824",
"0.54533553",
"0.54478383",
"0.5436862",
"0.5426197",
"0.5425805",
"0.54235476",
"0.54140216"
] | 0.6676301 | 0 |
Delete/enable/disable a specified channel partition on this PON. When creating a new Channel Partition, create it disabled, then define any associated Channel Pairs. Then enable the Channel Partition. | def channel_partition(self, name, partition=0, xpon_system=0, operation=None):
if operation.lower() not in ['delete', 'enable', 'disable']:
raise ValueError('Unsupported operation: {}'.format(operation))
try:
xml = 'interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"'
if operation.lower() is 'delete':
xml += '<interface operation="delete">'
else:
xml += '<interface>'
xml += '<type xmlns:adtn-xp="http://www.adtran.com/ns/yang/adtran-xpon">' +\
'adtn-xp:xpon-channel-partition</type>'
xml += '<adtn-xp:channel-partition xmlns:adtn-xp="http://www.adtran.com/ns/yang/adtran-xpon">'
xml += ' <adtn-xp:partition-id>{}</adtn-xp:partition-id>'.format(partition)
xml += ' <adtn-xp:xpon-system>{}</adtn-xp:xpon-system>'.format(xpon_system)
xml += '</adtn-xp:channel-partition>'
xml += '<enabled>{}</enabled>'.format('true' if operation.lower() == 'enable' else 'false')
xml += '<name>{}</name>'.format(name)
xml += '</interface></interfaces>'
results = yield self.olt.netconf_client.edit_config(xml)
returnValue(results)
except Exception as e:
self.log.exception('channel_partition')
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def channel_pair(self, name, partition, operation=None, **kwargs):\n if operation.lower() not in ['delete', 'enable', 'disable']:\n raise ValueError('Unsupported operation: {}'.format(operation))\n\n try:\n xml = 'interfaces xmlns=\"urn:ietf:params:xml:ns:yang:ietf-interfaces\"'\n\n if operation.lower() is 'delete':\n xml += '<interface operation=\"delete\">'\n else:\n xml += '<interface>'\n xml += '<type xmlns:adtn-xp=\"http://www.adtran.com/ns/yang/adtran-xpon\">' +\\\n 'adtn-xp:xpon-channel-pair</type>'\n xml += '<adtn-xp:channel-pair xmlns:adtn-xp=\"http://www.adtran.com/ns/yang/adtran-xpon\">'\n xml += ' <adtn-xp:channel-partition>{}</adtn-xp:channel-partition>'.format(partition)\n xml += ' <adtn-xp:channel-termination>channel-termination {}</adtn-xp:channel-termination>'.\\\n format(self.pon_id)\n xml += ' <adtn-xp:upstream-admin-label>{}</adtn-xp:upstream-admin-label>'.\\\n format(kwargs.get('upstream-admin-label', 1))\n xml += ' <adtn-xp:downstream-admin-label>{}</adtn-xp:downstream-admin-label>'.\\\n format(kwargs.get('downstream-admin-label', 1))\n xml += ' <adtn-xp:upstream-channel-id>{}</adtn-xp:upstream-channel-id>'.\\\n format(kwargs.get('upstream-channel-id', 15))\n xml += ' <adtn-xp:downstream-channel-id>{}</adtn-xp:downstream-channel-id>'.\\\n format(kwargs.get('downstream-channel-id', 15))\n xml += ' <adtn-xp:downstream-channel-fec-enable>{}</adtn-xp:downstream-channel-fec-enable>'. \\\n format('true' if kwargs.get('downstream-channel-fec-enable', True) else 'false')\n xml += ' <adtn-xp:upstream-channel-fec-enable>{}</adtn-xp:upstream-channel-fec-enable>'. \\\n format('true' if kwargs.get('upstream-channel-fec-enable', True) else 'false')\n xml += '</adtn-xp:channel-pair>'\n # TODO: Add support for upstream/downstream FEC-enable coming from here and not hard-coded\n\n xml += '<name>{}</name>'.format(name)\n xml += '</interface></interfaces>'\n\n results = yield self.olt.netconf_client.edit_config(xml)\n returnValue(results)\n\n except Exception as e:\n self.log.exception('channel_pair')\n raise",
"def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))",
"def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()",
"def delete_partition(self, partition):\n raise NotImplementedError('delete_file')",
"def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"",
"def _lock_partition(self, partition, shared=False):\n pass",
"def delete_partition(self, bulk_ad_group_product_partition):\n\n if bulk_ad_group_product_partition is not None and bulk_ad_group_product_partition.ad_group_criterion is not None:\n bulk_ad_group_product_partition.ad_group_criterion.AdGroupId=self._ad_group_id\n bulk_ad_group_product_partition.ad_group_criterion.Status='Deleted'\n if hasattr(bulk_ad_group_product_partition.ad_group_criterion, 'EditorialStatus'):\n bulk_ad_group_product_partition.ad_group_criterion.EditorialStatus=None\n self._partition_actions.append(bulk_ad_group_product_partition)",
"def deleteChannel(self, channelIndex):\n ch = self.channels[channelIndex]\n if ch.role != channel_pb2.Channel.Role.SECONDARY:\n raise Exception(\"Only SECONDARY channels can be deleted\")\n\n # we are careful here because if we move the \"admin\" channel the channelIndex we need to use\n # for sending admin channels will also change\n adminIndex = self.iface.localNode._getAdminChannelIndex()\n\n self.channels.pop(channelIndex)\n self._fixupChannels() # expand back to 8 channels\n\n index = channelIndex\n while index < self.iface.myInfo.max_channels:\n self.writeChannel(index, adminIndex=adminIndex)\n index += 1\n\n # if we are updating the local node, we might end up *moving* the admin channel index as we are writing\n if (self.iface.localNode == self) and index >= adminIndex:\n # We've now passed the old location for admin index (and writen it), so we can start finding it by name again\n adminIndex = 0",
"def part(self, channel):\n raise NotImplementedError",
"def delete_net_partition(self, netpartition):\r\n return self.delete(self.net_partition_path % netpartition)",
"def on_partition_change(self, new_partitions):\n if new_partitions is None:\n self.conn.create(self.partition_path, value=self.partitions)\n return\n\n if new_partitions != self.partitions:\n self.partitions = new_partitions\n self.rebalance()\n\n self.partitions_collected.set()",
"def _destroyedComputePartition(self, compute_node_id, compute_partition_id):\n instance = self._getSoftwareInstanceForComputePartition(\n compute_node_id,\n compute_partition_id)\n\n if instance.getSlapState() == 'destroy_requested':\n if instance.getValidationState() == 'validated':\n instance.invalidate()\n for login in instance.objectValues(portal_type=\"Certificate Login\"):\n if login.getValidationState() == 'validated':\n login.invalidate()",
"async def togglechannel(self, ctx, channel):\r\n\r\n user = ctx.message.author\r\n channel = await commands.clean_content().convert(ctx, channel)\r\n await ctx.message.delete()\r\n\r\n if channel == \"nsfw\":\r\n\r\n if self.bot.nsfw_role in user.roles:\r\n await user.remove_roles(self.bot.nsfw_role)\r\n await user.send(\"Access to NSFW channels revoked.\")\r\n else:\r\n await user.add_roles(self.bot.nsfw_role)\r\n await user.send(\"Access to NSFW channels granted.\")\r\n else:\r\n await user.send(\"{} is not a togglable channel.\".format(channel))",
"def update_partition(c, r_d, numnodes):\n host, port, f = ClusterCFG.parse_uri(c)\n\n # Create our socket.\n sock = Network.open_client(host, port)\n if ErrorHandle.is_error(sock):\n return ErrorHandle.wrap_error_tag('Socket could not be established.')\n\n # Pickle our command list ('K', f, r_d, numnodes), and send our message.\n Network.write(sock, ['K', f, r_d, numnodes])\n\n # Wait for a response to be sent back, and record this response.\n net_handler = lambda e: Network.close_wrapper(e, ErrorHandle.default_handler, sock)\n response = Network.read(sock, net_handler)\n\n # If an error exists, return the error.\n if ErrorHandle.is_error(response):\n return response\n\n # Otherwise, return the success message.\n return 'Success'",
"def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()",
"def send_part(self, channel) -> None:\n\n self.send_line('PART {}'.format(channel))",
"def on_delete_clicked(self,button):\n\t\tself.list_partitions.delete_selected_partition()",
"def addPartition(self,partitionData):\n self.PCAs[partitionData.id] = partitionData\n self.pcaStatemachineLock[partitionData.id] = threading.Lock()\n self.StateMachineForPca[partitionData.id] = Statemachine(self.StateMachineFile,\"Unconfigured\")\n self.isPCAinTransition[partitionData.id] = False\n self.pcaSequenceNumber[partitionData.id] = 0",
"def _lock_partition(self, partition, shared=False):\n # first we open a shared lock on all partitions, so that we don't interfere with concurrent\n # locks on all partitions or operations that could attempt to open a lock on all partitions\n # while we've locked only some partitions\n self._lock_all_partitions(shared=True)\n\n # Postgres advisory locks use integers, so we have to convert the partition string into\n # an integer. To do this we use crc32, which returns an unsigned integer. When using two\n # keys for advisory locks, the two keys are signed integers, so we have to adjust the crc32\n # value so that it doesn't exceed the maximum signed integer. Turning the partition str into\n # a crc32 value could produce the same integer for different partitions, but for the\n # purposes of locking to manage concurrency, this shouldn't be an issue.\n partition_int = binascii.crc32(partition.encode(\"utf-8\")) - SIGNED_MAX_INTEGER\n self._execute_lock(LOCK_PARTITION, key2=partition_int, shared=shared)",
"def delete_partition(self, partition_spec, if_exists=False, async_=False, hints=None):\n return self.partitions.delete(\n partition_spec, if_exists=if_exists, hints=hints, async_=async_\n )",
"def get_volume_connector(self, instance):\n props = {}\n # 'get_volume_connector' will be invoked during creation\n # of the partition and during deletion of the partition.\n # But 'wwpns' we can access only when partition is available.\n # During spawn flow 'get_volume_connector' function will be called\n # before 'spawn' function so to get 'wwpns' we first creating\n # the partition using 'prep_for_spawn' function so that\n # we can access 'wwpns'.(i.e - else part)\n # But during deletion 'get_volume_connector' will be called\n # after 'destroy' function which will delete the partition so\n # after that we can not get the 'wwpns'\n # In order to get 'wwpns' after 'destroy' function we are\n # saving 'wwpns' before deleting partition in 'destroy' function\n # in 'deleted_instance_wwpns_mapping' variable and using these 'wwpns'\n # in 'get_volume_connector'(i.e - if part)\n # after using these 'wwpns' we are removing these 'wwpns' from\n # 'deleted_instance_wwpns_mapping' variable because\n # we are not going to use these 'wwpns' any more after this.\n if instance.uuid in self.deleted_instance_wwpns_mapping:\n props['wwpns'] = self.deleted_instance_wwpns_mapping.pop(\n instance.uuid)\n else:\n inst = vm.PartitionInstance(instance, self._cpc)\n props['wwpns'] = inst.get_partition_wwpns()\n\n props['host'] = instance.uuid\n\n return props",
"def do_configure_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_configure_partition: part: %s\", part)",
"def enableOrDisableFeature(self, enable):\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # pre-validation\n self._helpers.validateFeatureIsSupported(scalable_pmem_config)\n self._helpers.validateFunctionalityIsEnabled(scalable_pmem_config)\n\n if enable is False:\n # If user disables Scalable PMEM, revert any pending changes to\n # prevent data or configuration loss\n if self._rdmc.interactive:\n message = u\"Warning: disabling Scalable Persistent Memory will \"\\\n \"revert any pending configuration changes.\\n\"\n self._helpers.confirmChanges(message=message)\n self._restHelpers.revertSettings()\n\n patchAttributes = {\n \"FeatureEnabled\" : enable\n }\n _ = self._restHelpers.patchScalablePmemSettingAttributes(patchAttributes)\n\n sys.stdout.write(u\"\\nThe Scalable Persistent Memory feature has been \"\\\n \"set to: {}\\n\".format(\"Enabled\" if enable else \"Disabled\"))\n\n self._helpers.noticeRestartRequired(scalable_pmem_config)\n\n sys.stdout.write(\"\\n\\n\")",
"def create_partition(self, partition_spec, if_not_exists=False, async_=False, hints=None):\n return self.partitions.create(\n partition_spec, if_not_exists=if_not_exists, hints=hints, async_=async_\n )",
"async def togglechannel(self, ctx, channel):\n\n user = ctx.message.author\n await ctx.message.delete()\n\n if channel == \"nsfw\":\n\n if self.bot.nsfw_role in user.roles:\n await user.remove_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels revoked.\")\n else:\n await user.add_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels granted.\")\n else:\n await user.send(\"{} is not a togglable channel.\".format(channel))",
"async def enable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation enabled.\"))",
"def requestComputerPartition(self, computer_id=None,\n computer_partition_id=None, software_release=None, software_type=None,\n partition_reference=None, partition_parameter_xml=None,\n filter_xml=None, state=None, shared_xml=_MARKER):\n return self._requestComputePartition(computer_id, computer_partition_id,\n software_release, software_type, partition_reference,\n shared_xml, partition_parameter_xml, filter_xml, state)",
"def _stoppedComputePartition(self, compute_node_id, compute_partition_id):\n instance = self._getSoftwareInstanceForComputePartition(\n compute_node_id,\n compute_partition_id)\n instance.setAccessStatus(\n 'Instance correctly stopped', \"stopped\", reindex=1)",
"def update_partition(self, bulk_ad_group_product_partition):\n\n if bulk_ad_group_product_partition is not None and bulk_ad_group_product_partition.ad_group_criterion is not None:\n bulk_ad_group_product_partition.ad_group_criterion.AdGroupId=self._ad_group_id\n bulk_ad_group_product_partition.ad_group_criterion.Status=None\n if hasattr(bulk_ad_group_product_partition.ad_group_criterion, 'EditorialStatus'):\n bulk_ad_group_product_partition.ad_group_criterion.EditorialStatus=None\n self._partition_actions.append(bulk_ad_group_product_partition)",
"def partition(data, num_partitions=None, by=None, **kwargs):\n return Component(\n \"Partition\",\n arguments={\n 'data': Component.of(data),\n 'num_partitions': Component.of(num_partitions),\n 'by': Component.of(by)\n },\n options={\n \n },\n constraints=kwargs)"
] | [
"0.5930615",
"0.5629349",
"0.5421836",
"0.52993655",
"0.52665484",
"0.5152782",
"0.5028327",
"0.49557593",
"0.48950937",
"0.4868015",
"0.4856679",
"0.47942936",
"0.47765732",
"0.47706714",
"0.47600013",
"0.4758206",
"0.47271204",
"0.47198334",
"0.4715526",
"0.47069684",
"0.4696373",
"0.46943992",
"0.46934894",
"0.4646655",
"0.46151233",
"0.4611658",
"0.46113786",
"0.45853618",
"0.45828003",
"0.45684263"
] | 0.7122026 | 0 |
Create/delete a channel pair on a specific channel_partition for a PON | def channel_pair(self, name, partition, operation=None, **kwargs):
if operation.lower() not in ['delete', 'enable', 'disable']:
raise ValueError('Unsupported operation: {}'.format(operation))
try:
xml = 'interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"'
if operation.lower() is 'delete':
xml += '<interface operation="delete">'
else:
xml += '<interface>'
xml += '<type xmlns:adtn-xp="http://www.adtran.com/ns/yang/adtran-xpon">' +\
'adtn-xp:xpon-channel-pair</type>'
xml += '<adtn-xp:channel-pair xmlns:adtn-xp="http://www.adtran.com/ns/yang/adtran-xpon">'
xml += ' <adtn-xp:channel-partition>{}</adtn-xp:channel-partition>'.format(partition)
xml += ' <adtn-xp:channel-termination>channel-termination {}</adtn-xp:channel-termination>'.\
format(self.pon_id)
xml += ' <adtn-xp:upstream-admin-label>{}</adtn-xp:upstream-admin-label>'.\
format(kwargs.get('upstream-admin-label', 1))
xml += ' <adtn-xp:downstream-admin-label>{}</adtn-xp:downstream-admin-label>'.\
format(kwargs.get('downstream-admin-label', 1))
xml += ' <adtn-xp:upstream-channel-id>{}</adtn-xp:upstream-channel-id>'.\
format(kwargs.get('upstream-channel-id', 15))
xml += ' <adtn-xp:downstream-channel-id>{}</adtn-xp:downstream-channel-id>'.\
format(kwargs.get('downstream-channel-id', 15))
xml += ' <adtn-xp:downstream-channel-fec-enable>{}</adtn-xp:downstream-channel-fec-enable>'. \
format('true' if kwargs.get('downstream-channel-fec-enable', True) else 'false')
xml += ' <adtn-xp:upstream-channel-fec-enable>{}</adtn-xp:upstream-channel-fec-enable>'. \
format('true' if kwargs.get('upstream-channel-fec-enable', True) else 'false')
xml += '</adtn-xp:channel-pair>'
# TODO: Add support for upstream/downstream FEC-enable coming from here and not hard-coded
xml += '<name>{}</name>'.format(name)
xml += '</interface></interfaces>'
results = yield self.olt.netconf_client.edit_config(xml)
returnValue(results)
except Exception as e:
self.log.exception('channel_pair')
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def channel_partition(self, name, partition=0, xpon_system=0, operation=None):\n if operation.lower() not in ['delete', 'enable', 'disable']:\n raise ValueError('Unsupported operation: {}'.format(operation))\n\n try:\n xml = 'interfaces xmlns=\"urn:ietf:params:xml:ns:yang:ietf-interfaces\"'\n\n if operation.lower() is 'delete':\n xml += '<interface operation=\"delete\">'\n else:\n xml += '<interface>'\n xml += '<type xmlns:adtn-xp=\"http://www.adtran.com/ns/yang/adtran-xpon\">' +\\\n 'adtn-xp:xpon-channel-partition</type>'\n xml += '<adtn-xp:channel-partition xmlns:adtn-xp=\"http://www.adtran.com/ns/yang/adtran-xpon\">'\n xml += ' <adtn-xp:partition-id>{}</adtn-xp:partition-id>'.format(partition)\n xml += ' <adtn-xp:xpon-system>{}</adtn-xp:xpon-system>'.format(xpon_system)\n xml += '</adtn-xp:channel-partition>'\n xml += '<enabled>{}</enabled>'.format('true' if operation.lower() == 'enable' else 'false')\n\n xml += '<name>{}</name>'.format(name)\n xml += '</interface></interfaces>'\n\n results = yield self.olt.netconf_client.edit_config(xml)\n returnValue(results)\n\n except Exception as e:\n self.log.exception('channel_partition')\n raise",
"def make_permutation(partition):\r\n P = Permutation()\r\n c = 0\r\n for j in range(len(partition)):\r\n a = []\r\n for h in range(partition[j]):\r\n a.append(c)\r\n c = c + 1 \r\n if (c == 1):\r\n P1 = Permutation()\r\n c = 0\r\n else:\r\n P1 = Permutation([a])\r\n P = P*P1\r\n return P",
"def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))",
"def part(self, channel):\n raise NotImplementedError",
"def _createOwnPartition(self, databaseCursor, uniqueItems):\n self.logger.debug(\"%s - in createOwnPartition for %s\",threading.currentThread().getName(),self.name)\n for x in uniqueItems:\n #self.logger.debug(\"DEBUG - item value is %s\",x)\n partitionCreationParameters = self.partitionCreationParameters(x)\n partitionName = self.partitionNameTemplate % partitionCreationParameters[\"partitionName\"]\n if partitionWasCreated(partitionName):\n #self.logger.debug(\"DEBUG - skipping creation of %s\",partitionName)\n continue\n partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters\n #self.logger.debug(\"%s - Sql for %s is %s\",threading.currentThread().getName(),self.name,partitionCreationSql)\n aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)\n self.logger.debug(\"%s - savepoint createPartitions_%s\",threading.currentThread().getName(), partitionName)\n databaseCursor.execute(\"savepoint createPartitions_%s\" % partitionName)\n try:\n self.logger.debug(\"%s - creating %s\", threading.currentThread().getName(), partitionName)\n aPartition._createSelf(databaseCursor)\n markPartitionCreated(partitionName)\n self.logger.debug(\"%s - successful - releasing savepoint\", threading.currentThread().getName())\n databaseCursor.execute(\"release savepoint createPartitions_%s\" % partitionName)\n except pg.ProgrammingError, x:\n self.logger.debug(\"%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s\", threading.currentThread().getName(), partitionName, str(x).strip())\n databaseCursor.execute(\"rollback to createPartitions_%s; release savepoint createPartitions_%s;\" % (partitionName, partitionName))\n databaseCursor.connection.commit()",
"def test_update_preference_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass",
"def create_community_dict(partition, graph):\n it = 0\n communities = dict()\n for part in partition:\n for vertex in part:\n communities[graph.vs[vertex]['name']] = it\n it += 1\n return communities",
"async def create(self, ctx, public: Optional[bool] = False, *, name: str):\n data = await self.config.guild(ctx.guild).all()\n if data[\"private\"]:\n try:\n if ctx.author.voice.channel.id == data[\"pstart\"]:\n key = await self._generate_key(data[\"pchannels\"].keys())\n try:\n await ctx.author.send(\n _(\n \"The key to your private room is: ``{key}``\\nGive this key to a friend and ask them to use ``{command}`` to join your private room.\"\n ).format(key=key, command=f\"{ctx.clean_prefix}vc join {key}\")\n )\n except discord.Forbidden:\n await ctx.send(\n _(\"Couldn't send the key to your private channel via DM. Aborting...\")\n )\n return\n if public:\n ov = {\n ctx.author: discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True, manage_channels=True\n )\n }\n else:\n ov = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n view_channel=True, connect=False\n ),\n ctx.author: discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True, manage_channels=True\n ),\n }\n c = await ctx.guild.create_voice_channel(\n name,\n overwrites=ov,\n category=ctx.guild.get_channel(data[\"pcat\"]),\n reason=_(\"Private room\"),\n )\n await ctx.author.move_to(c, reason=_(\"Private channel.\"))\n data[\"pchannels\"][key] = c.id\n await self.config.guild(ctx.guild).pchannels.set(data[\"pchannels\"])\n else:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n except AttributeError:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))",
"def add_dummy_channel(P8gen, particle, remainder):\n pdg = P8gen.getPythiaInstance().particleData\n charge = pdg.charge(particle)\n if charge > 0:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 -11'.format(particle, remainder))\n elif charge < 0:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 11'.format(particle, remainder))\n else:\n P8gen.SetParameters('{}:addChannel 1 {:.16} 0 22 22'.format(particle, remainder))",
"def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"",
"def get_volume_connector(self, instance):\n props = {}\n # 'get_volume_connector' will be invoked during creation\n # of the partition and during deletion of the partition.\n # But 'wwpns' we can access only when partition is available.\n # During spawn flow 'get_volume_connector' function will be called\n # before 'spawn' function so to get 'wwpns' we first creating\n # the partition using 'prep_for_spawn' function so that\n # we can access 'wwpns'.(i.e - else part)\n # But during deletion 'get_volume_connector' will be called\n # after 'destroy' function which will delete the partition so\n # after that we can not get the 'wwpns'\n # In order to get 'wwpns' after 'destroy' function we are\n # saving 'wwpns' before deleting partition in 'destroy' function\n # in 'deleted_instance_wwpns_mapping' variable and using these 'wwpns'\n # in 'get_volume_connector'(i.e - if part)\n # after using these 'wwpns' we are removing these 'wwpns' from\n # 'deleted_instance_wwpns_mapping' variable because\n # we are not going to use these 'wwpns' any more after this.\n if instance.uuid in self.deleted_instance_wwpns_mapping:\n props['wwpns'] = self.deleted_instance_wwpns_mapping.pop(\n instance.uuid)\n else:\n inst = vm.PartitionInstance(instance, self._cpc)\n props['wwpns'] = inst.get_partition_wwpns()\n\n props['host'] = instance.uuid\n\n return props",
"def channels_create(token, name, is_public):\n auth_u_id = get_id_from_token(token)\n if len(name) > 20:\n raise ValueError(\"\")\n channel_payload = {\n \"name\": name,\n \"all_members\": [auth_u_id],\n \"owners\": [auth_u_id],\n \"is_public\": is_public,\n \"is_standup_active\": False,\n \"time_finish\": None,\n \"standup_queue\": [],\n }\n return channels.add(channel_payload)",
"def create_channels(self, pad_segment, pool=None):\n assert len(self.tracks) > 0\n\n xs = []\n ys = []\n\n def set_track_ptc(track, ptc):\n node_d = self.nodes[track]._asdict()\n loc_d = self.nodes[track].loc._asdict()\n assert loc_d['ptc'] is None\n loc_d['ptc'] = ptc\n node_d['loc'] = NodeLoc(**loc_d)\n\n self.nodes[track] = Node(**node_d)\n\n for track in self.tracks:\n track_node = self.nodes[track]\n\n xs.append(track_node.loc.x_low)\n xs.append(track_node.loc.x_high)\n ys.append(track_node.loc.y_low)\n ys.append(track_node.loc.y_high)\n\n x_tracks = {}\n y_tracks = {}\n\n for track in self.tracks:\n track_node = self.nodes[track]\n\n if track_node.type == NodeType.CHANX:\n assert track_node.loc.y_low == track_node.loc.y_high\n\n if track_node.loc.y_low not in x_tracks:\n x_tracks[track_node.loc.y_low] = []\n\n x_tracks[track_node.loc.y_low].append((\n track_node.loc.x_low,\n track_node.loc.x_high,\n track))\n elif track_node.type == NodeType.CHANY:\n assert track_node.loc.x_low == track_node.loc.x_high\n\n if track_node.loc.x_low not in y_tracks:\n y_tracks[track_node.loc.x_low] = []\n\n y_tracks[track_node.loc.x_low].append((\n track_node.loc.y_low,\n track_node.loc.y_high,\n track))\n else:\n assert False, track_node\n\n x_list = []\n y_list = []\n\n x_channel_models = {}\n y_channel_models = {}\n\n\n if pool is not None:\n for y in x_tracks:\n x_channel_models[y] = pool.apply_async(process_track, (x_tracks[y],))\n\n for x in y_tracks:\n y_channel_models[x] = pool.apply_async(process_track, (y_tracks[x],))\n\n for y in progressbar.progressbar(range(max(x_tracks)+1)):\n if y in x_tracks:\n if pool is None:\n x_channel_models[y] = process_track(x_tracks[y])\n else:\n x_channel_models[y] = x_channel_models[y].get()\n\n x_list.append(len(x_channel_models[y].trees))\n for idx, tree in enumerate(x_channel_models[y].trees):\n for i in tree:\n set_track_ptc(track=i[2], ptc=idx)\n else:\n x_list.append(0)\n\n for x in progressbar.progressbar(range(max(y_tracks)+1)):\n if x in y_tracks:\n if pool is None:\n y_channel_models[x] = process_track(y_tracks[x])\n else:\n y_channel_models[x] = y_channel_models[x].get()\n\n y_list.append(len(y_channel_models[x].trees))\n for idx, tree in enumerate(y_channel_models[x].trees):\n for i in tree:\n set_track_ptc(track=i[2], ptc=idx)\n else:\n y_list.append(0)\n\n x_min=min(xs)\n y_min=min(ys)\n x_max=max(xs)\n y_max=max(ys)\n\n num_padding = 0\n for chan, channel_model in x_channel_models.items():\n for ptc, start, end in channel_model.fill_empty(x_min, x_max):\n num_padding += 1\n track_idx = self.add_track(\n track=Track(\n direction='X',\n x_low=start,\n y_low=chan,\n x_high=end,\n y_high=chan,\n ),\n segment_id=pad_segment,\n capacity=0,\n timing=None)\n\n set_track_ptc(track_idx, ptc)\n\n for chan, channel_model in y_channel_models.items():\n for ptc, start, end in channel_model.fill_empty(y_min, y_max):\n num_padding += 1\n track_idx = self.add_track(\n track=Track(\n direction='Y',\n x_low=chan,\n y_low=start,\n x_high=chan,\n y_high=end,\n ),\n segment_id=pad_segment,\n capacity=0,\n timing=None)\n\n set_track_ptc(track_idx, ptc)\n\n print('Number padding nodes {}'.format(num_padding))\n\n return Channels(\n chan_width_max=max(max(x_list), max(y_list)),\n x_min=x_min,\n y_min=y_min,\n x_max=x_max,\n y_max=y_max,\n x_list=[ChannelList(idx, info) for idx, info in enumerate(x_list)],\n y_list=[ChannelList(idx, info) for idx, info in enumerate(y_list)],\n )",
"def new_channel(session, channel):\n session.create_chan_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(get_random_bytes(\n config.SECURE_CHANNEL_KEY_SIZE_BYTES), session.encryption_key)).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: session.user,\n kk.chid: channel,\n kk.chkey: key\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)",
"def test_update_multiple_preferences_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass",
"def _create_bipolar_channels(cfg, raw, subject, session) -> None:\n if ch_types == ['eeg'] and cfg.eeg_bipolar_channels:\n msg = 'Creating bipolar channels …'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n raw.load_data()\n for ch_name, (anode, cathode) in cfg.eeg_bipolar_channels.items():\n msg = f' {anode} – {cathode} -> {ch_name}'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n mne.set_bipolar_reference(raw, anode=anode, cathode=cathode,\n ch_name=ch_name, drop_refs=False,\n copy=False)\n # If we created a new bipolar channel that the user wishes to\n # # use as an EOG channel, it is probably a good idea to set its\n # channel type to 'eog'. Bipolar channels, by default, don't have a\n # location, so one might get unexpected results otherwise, as the\n # channel would influence e.g. in GFP calculations, but not appear on\n # topographic maps.\n if (eog_channels and\n any([eog_ch_name in cfg.eeg_bipolar_channels\n for eog_ch_name in eog_channels])):\n msg = 'Setting channel type of new bipolar EOG channel(s) …'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n for eog_ch_name in eog_channels:\n if eog_ch_name in cfg.eeg_bipolar_channels:\n msg = f' {eog_ch_name} -> eog'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject,\n session=session))\n raw.set_channel_types({eog_ch_name: 'eog'})",
"async def change_promotion_channel(self, **kwargs):\n\n facebook = kwargs.get('facebook', None)\n twitter = kwargs.get('twitter', None)\n youtube = kwargs.get('youtube', None)\n twitch = kwargs.get('twitch', None)\n privacy = kwargs.get('privacy', None)\n data = {\n \"facebook\": facebook,\n \"twitter\": twitter,\n \"youtube\": youtube,\n \"twitch\": twitch,\n \"promotionChannelsVisibilityPrivacy\": privacy\n }\n\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone/promotion-channels',\n method='post',\n data=data,\n )\n return e",
"def test_partitioner(self):\n args = \"xyzzy\", set([1, 2, 3])\n partitioner = self.tx_client.SetPartitioner(*args)\n self.assertEqual(partitioner.state, PartitionState.ALLOCATING)\n self.assertEqual(partitioner._partitioner.args, args)\n self.assertEqual(partitioner._partitioner.kwargs, {})\n\n partitioner._partitioner.state = PartitionState.ACQUIRED\n self.assertEqual(partitioner.state, PartitionState.ACQUIRED)",
"def create_port_postcommit(self, mech_context):\n\n LOG.debug(\"create_port_postcommit: called\")\n port = mech_context.current\n port_id = port['id']\n network_id = port['network_id']\n tenant_id = port['tenant_id']\n host_id = mech_context._binding.host\n context = mech_context._plugin_context\n try:\n network = seamicro_db.get_network(context, network_id)\n except Exception:\n LOG.exception(\n _LE(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n raise Exception(\n _(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n\n vlan_id = network['vlan']\n switch_ip, server_id, nics = _get_switch_info(self._switch, host_id)\n if switch_ip is not None and server_id is not None and nics is not None:\n try:\n interfaces = self.client[switch_ip].interfaces.list()\n for interface in interfaces:\n interface.add_tagged_vlan(vlan_id)\n\n server = self.client[switch_ip].servers.get(server_id)\n if nics:\n server.set_tagged_vlan(vlan_id, nics=nics)\n else:\n server.set_tagged_vlan(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(\n _LE(\"SeaMicro driver: failed to create port\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n seamicro_db.delete_port(context, port_id)\n raise Exception(\n _(\"SeaMicro Mechanism: create_port_postcommit failed\"))\n\n LOG.info(\n _LI(\"created port (postcommit): port_id=%(port_id)s\"\n \" network_id=%(network_id)s tenant_id=%(tenant_id)s\"\n \" switch_ip=%(switch_ip)s server_id=%(server_id)s\"),\n {'port_id': port_id,\n 'network_id': network_id, 'tenant_id': tenant_id,\n 'switch_ip': switch_ip, 'server_id': server_id})",
"def partition_network(self, *args):\n Blockade.blockade_create_partition(*args)",
"def build_channel(channel, client=None, topic=None, mode=None, num_procs=1, group=None, shard_id=u'shardId-000000000000', shard_it_type='LATEST'):\n channel_lc = channel.lower()\n if channel_lc == \"file\" or channel_lc == \"geowatchchannelfile\":\n from geowatchutil.channel.geowatch_channel_file import GeoWatchChannelFile\n return GeoWatchChannelFile(client, mode)\n elif channel_lc == \"kafka\" or channel_lc == \"geowatchchannelkafka\":\n from geowatchutil.channel.geowatch_channel_kafka import GeoWatchChannelKafka\n return GeoWatchChannelKafka(client, topic, mode, num_procs=num_procs, group=group)\n elif channel_lc == \"kinesis\" or channel_lc == \"geowatchchannelkinesis\":\n from geowatchutil.channel.geowatch_channel_kinesis import GeoWatchChannelKinesis\n return GeoWatchChannelKinesis(client, topic, mode, num_procs=num_procs, shard_id=shard_id, shard_it_type=shard_it_type)\n elif channel_lc == \"sns\" or channel_lc == \"geowatchchannelsns\":\n from geowatchutil.channel.geowatch_channel_sns import GeoWatchChannelSNS\n return GeoWatchChannelSNS(client, topic, mode)\n elif channel_lc == \"sqs\" or channel_lc == \"geowatchchannelsqs\":\n from geowatchutil.channel.geowatch_channel_sqs import GeoWatchChannelSQS\n return GeoWatchChannelSQS(client, topic, mode)\n elif channel_lc == \"slack\" or channel_lc == \"geowatchchannelslack\":\n from geowatchutil.channel.geowatch_channel_slack import GeoWatchChannelSlack\n return GeoWatchChannelSlack(client, topic, mode)\n elif channel_lc == \"wfs\" or channel_lc == \"geowatchchannelwfs\":\n from geowatchutil.channel.geowatch_channel_wfs import GeoWatchChannelWFS\n return GeoWatchChannelWFS(client, topic, mode)",
"def createPairFuzPerm(_session, _segment, _beg, _end, _const):\n return createPair(_session, _segment, _beg, _end, sc.SC_FUZ | sc.SC_PERMANENT | _const)",
"def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass",
"async def part(self, channel : str):\n await self._connection.part(channel)",
"def create_channel(pvname, connect=False, auto_cb=True, callback=None):\n #\n # Note that _CB_CONNECT (defined above) is a global variable, holding\n # a reference to _onConnectionEvent: This is really the connection\n # callback that is run -- the callack here is stored in the _cache\n # and called by _onConnectionEvent.\n pvn = STR2BYTES(pvname)\n ctx = current_context()\n global _cache\n if ctx not in _cache:\n _cache[ctx] = {}\n if pvname not in _cache[ctx]: # new PV for this context\n entry = {'conn':False, 'chid': None,\n 'ts': 0, 'failures':0, 'value': None,\n 'callbacks': [ callback ]}\n _cache[ctx][pvname] = entry\n else:\n entry = _cache[ctx][pvname]\n if not entry['conn'] and callback is not None: # pending connection\n _cache[ctx][pvname]['callbacks'].append(callback)\n elif (hasattr(callback, '__call__') and\n not callback in entry['callbacks']):\n entry['callbacks'].append(callback)\n callback(chid=entry['chid'], pvname=pvname, conn=entry['conn'])\n\n conncb = 0\n if auto_cb:\n conncb = _CB_CONNECT\n if entry.get('chid', None) is not None:\n # already have or waiting on a chid\n chid = _cache[ctx][pvname]['chid']\n else:\n chid = dbr.chid_t()\n ret = libca.ca_create_channel(pvn, conncb, 0, 0,\n ctypes.byref(chid))\n PySEVCHK('create_channel', ret)\n entry['chid'] = chid\n\n if connect:\n connect_channel(chid)\n if conncb != 0:\n poll()\n return chid",
"def delete_port_postcommit(self, mech_context):\n\n LOG.debug(\"delete_port_postcommit: called\")\n port = mech_context.current\n port_id = port['id']\n network_id = port['network_id']\n tenant_id = port['tenant_id']\n host_id = mech_context._binding.host\n context = mech_context._plugin_context\n\n try:\n network = seamicro_db.get_network(context, network_id)\n except Exception:\n LOG.exception(\n _LE(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n raise Exception(\n _(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n\n vlan_id = network['vlan']\n\n switch_ip, server_id, nics = _get_switch_info(self._switch, host_id)\n if switch_ip is not None and server_id is not None and nics is not None:\n try:\n interfaces = self.client[switch_ip].interfaces.list()\n for interface in interfaces:\n interface.remove_tagged_vlan(vlan_id)\n\n server = self.client[switch_ip].servers.get(server_id)\n if nics:\n server.unset_tagged_vlan(vlan_id, nics=nics)\n else:\n server.unset_tagged_vlan(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(\n _LE(\"SeaMicro driver: failed to delete port\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"SeaMicro Mechanism: delete_port_postcommit failed\"))\n\n LOG.info(\n _LI(\"delete port (postcommit): port_id=%(port_id)s\"\n \" network_id=%(network_id)s tenant_id=%(tenant_id)s\"\n \" switch_ip=%(switch_ip)s server_id=%(server_id)s\"),\n {'port_id': port_id,\n 'network_id': network_id, 'tenant_id': tenant_id,\n 'switch_ip': switch_ip, 'server_id': server_id})",
"def on_partition_change(self, new_partitions):\n if new_partitions is None:\n self.conn.create(self.partition_path, value=self.partitions)\n return\n\n if new_partitions != self.partitions:\n self.partitions = new_partitions\n self.rebalance()\n\n self.partitions_collected.set()",
"def _create_channels(self):\n for name, creator in CommonBase.get_channels(self.__class__):\n for cls, id in creator.pairs:\n # If channel pair was created with MultiChannelCreator\n # add channel interface to collection with passed attribute name\n if isinstance(creator, CommonBase.MultiChannelCreator):\n child = self.add_child(cls, id, collection=name, **creator.kwargs)\n # If channel pair was created with ChannelCreator\n # name channel interface with passed attribute name\n elif isinstance(creator, CommonBase.ChannelCreator):\n child = self.add_child(cls, id, attr_name=name, **creator.kwargs)\n else:\n raise ValueError(\"Invalid class '{creator}' for channel creation.\")\n child._protected = True",
"def createPairPosPerm(_session, _segment, _beg, _end, _const):\n return createPair(_session, _segment, _beg, _end, sc.SC_POS | sc.SC_PERMANENT | _const)",
"def create_servicech(self, conf, params):\n\t\tpass"
] | [
"0.61758345",
"0.53666586",
"0.5140615",
"0.4981957",
"0.49264306",
"0.4917317",
"0.4888045",
"0.48726103",
"0.48679605",
"0.48194185",
"0.48172766",
"0.47911158",
"0.47854713",
"0.47262764",
"0.4725585",
"0.47093052",
"0.4699324",
"0.46934438",
"0.4691812",
"0.4669043",
"0.46654534",
"0.4665311",
"0.4655793",
"0.4633825",
"0.46195897",
"0.45715362",
"0.45567223",
"0.45434743",
"0.45091796",
"0.45033953"
] | 0.6580948 | 0 |
Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. | def _process_worker(call_queue, result_queue):
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
exc = _ExceptionWithTraceback(e, e.__traceback__)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
logger.exception(e) # 主要是直接显示错误。
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call_queue_closure(data, call_queue):\n result = data.copy()\n for func, f_args, f_kwargs in call_queue:\n try:\n result = func(result, *f_args, **f_kwargs)\n except Exception as err:\n self.call_queue = []\n raise err\n return result",
"def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return",
"def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()",
"def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))",
"def __call__(self):\n dv = None\n #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for.\n while self.active_call_count < self.parallel and self.queue:\n #Get a chunk of entries from the command queue so we can make a batch.\n subqueue = self.queue[:self.max_batch_size]\n self.queue = self.queue[self.max_batch_size:]\n #Send a single batch to the currently selected RPC node.\n dv = self._process_batch(subqueue)\n #If there is nothing left to do, there is nothing left to do\n if not self.queue and self.active_call_count == 0:\n self.log.error(\"Queue is empty and no active HTTPS-POSTs remaining.\")\n if self.stop_when_empty:\n #On request, stop reactor when queue empty while no active queries remain.\n self.reactor.stop() \n return dv",
"def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)",
"def run(self):\n proc_name = self.name\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n debug('{}: Exiting'.format(proc_name))\n self.task_queue.task_done()\n break\n debug('{}: {}'.format(proc_name, next_task))\n answer = next_task()\n self.task_queue.task_done()\n self.result_queue.put(answer)\n return",
"def _worker(\n self, work_queue: Queue, done_queue: Queue, build_results: bool = True\n ):\n for chunk in iter(work_queue.get, \"STOP\"):\n interactions = self._play_matches(chunk, build_results)\n done_queue.put(interactions)\n done_queue.put(\"STOP\")\n return True",
"def _process_batch(self, subqueue):\n try:\n timeoutCall = None\n jo = None\n if self.max_batch_size == 1:\n #At time of writing, the regular nodes have broken JSON-RPC batch handling.\n #So when max_batch_size is set to one, we assume we need to work around this fact.\n jo = json.dumps(self.entries[subqueue[0]]._get_rpc_call_object())\n else:\n #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes.\n qarr = list()\n for num in subqueue:\n qarr.append(self.entries[num]._get_rpc_call_object())\n jo = json.dumps(qarr)\n url = \"https://\" + self.nodes[self.node_index] + \"/\"\n url = str.encode(url)\n deferred = self.agent.request('POST',\n url,\n Headers({\"User-Agent\" : ['Async Steem for Python v0.6.1'],\n \"Content-Type\": [\"application/json\"]}),\n _StringProducer(jo))\n def process_one_result(reply):\n \"\"\"Process a single response from an JSON-RPC command.\"\"\"\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n def handle_response(response):\n \"\"\"Handle response for JSON-RPC batch query invocation.\"\"\"\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addCallback(handle_response)\n def _handle_error(error):\n \"\"\"Handle network level error for JSON-RPC request.\"\"\"\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addErrback(_handle_error)\n timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel)\n #Keep track of the number of active parallel HTTPS posts.\n self.active_call_count = self.active_call_count + 1\n return deferred\n except Exception as ex:\n self.log.failure(\"Error in _process_batch {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def _main_task(self, design: Design, queue: Queue = None, index: int = None) \\\n -> Dict[str, float]:\n try:\n result = self._evaluate(design)\n result['valid'] = True\n except Exception as e:\n result = {'valid': False, 'message': str(e)}\n\n if queue:\n assert index is not None, \"when queue is provided index should also be given\"\n queue.put((index, result))\n\n return result",
"def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)",
"def run(self):\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n with _threadpool_limits(limits=1):\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)",
"def __call__(self, q, threads = None):\n if threads is -1: threads = cpu_count()\n\n if threads is None:\n results = [self.evaluate(v) for v in q]\n elif type(threads) is int and threads > 0:\n workers = Pool(threads)\n results = workers.map(self.evaluate, q)\n else:\n raise ValueError('threads keyword must be either -1 or an integer greater than zero')\n\n mu = [ t[0] for t in results ]\n sig = [ t[1] for t in results ]\n return array(mu), array(sig)",
"def iterate_days(results_queue, idx=0):\n # Declaration of learners and results' vectors\n ucb1_learner = UCB1Learner(len(prices))\n tsgauss_learner = TSLearnerGauss(len(prices))\n vector_daily_price_ucb1_loc = []\n vector_daily_revenue_ucb1_loc = []\n vector_daily_price_ts_loc = []\n vector_daily_revenue_ts_loc = []\n\n print('Starting execution ' + str(idx))\n\n # For every day:\n for t in range(T):\n if t % 20 == 0:\n log(\"Iteration day: {:3d} - execution: {:3d}\".format(t, idx))\n # Get new users in the day t and their costs\n [new_user_1, new_user_2, new_user_3] = env.get_all_new_users_daily(bids[0])\n new_users = [new_user_1, new_user_2, new_user_3]\n [cost1, cost2, cost3] = env.get_all_cost_per_click(bids[0])\n cost = [cost1, cost2, cost3]\n\n # Get the total cost\n total_cost = 0\n for user in range(len(new_users)):\n total_cost += new_users[user] * cost[user]\n\n # Choose the arm and thus the price for UCB1\n daily_arm_ucb1 = ucb1_learner.pull_arm()\n daily_price_ucb1 = prices[daily_arm_ucb1]\n vector_daily_price_ucb1_loc.append(daily_price_ucb1)\n\n # Choose the arm and thus the price for Thomson Sampling\n daily_arm_ts = tsgauss_learner.pull_arm()\n daily_price_ts = prices[daily_arm_ts]\n vector_daily_price_ts_loc.append(daily_price_ts)\n\n # Calculate the number of bought items\n daily_bought_items_per_class_ucb1 = [0, 0, 0]\n daily_bought_items_per_class_ts = [0, 0, 0]\n\n for user in range(len(new_users)):\n for c in range(new_users[user]):\n daily_bought_items_per_class_ucb1[user] += env.buy(daily_price_ucb1, user + 1)\n daily_bought_items_per_class_ts[user] += env.buy(daily_price_ts, user + 1)\n\n # Sum up the n. of bought items\n daily_bought_items_ucb1 = sum(daily_bought_items_per_class_ucb1)\n daily_bought_items_ts = sum(daily_bought_items_per_class_ts)\n\n # Calculate the revenue\n daily_revenue_ucb1 = daily_bought_items_ucb1 * env.get_margin(daily_price_ucb1) - total_cost\n daily_revenue_ts = daily_bought_items_ts * env.get_margin(daily_price_ts) - total_cost\n\n # Add to the vector the daily revenue\n vector_daily_revenue_ucb1_loc.append(daily_revenue_ucb1)\n vector_daily_revenue_ts_loc.append(daily_revenue_ts)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1[user - 1], daily_price_ucb1,\n user)))\n\n ucb1_learner.update_observations(daily_arm_ucb1, daily_revenue_ucb1, next_30_days)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ts[user - 1], daily_price_ts,\n user)))\n tsgauss_learner.update_observations(daily_arm_ts, daily_revenue_ts, next_30_days)\n\n if plot_l_t == True and t>=29:\n plot_learned_curve(tsgauss_learner.mu, tsgauss_learner.tau, real, tsgauss_learner.n_pulled_arms, plots_folder, t)\n\n print('Ending execution ' + str(idx))\n\n # put results in the given queue\n results_queue.put((ucb1_learner.collected_rewards, tsgauss_learner.collected_rewards, vector_daily_price_ucb1_loc,\n vector_daily_revenue_ucb1_loc, vector_daily_price_ts_loc, vector_daily_revenue_ts_loc, tsgauss_learner.mu, tsgauss_learner.tau, tsgauss_learner.n_pulled_arms))",
"def drain_call_queue(self):\n if len(self.call_queue) == 0:\n return\n self.apply(lambda x: x)",
"def iterate_days(results_queue, idx=0):\n # Declaration of learners and results' vectors\n ucb1_learner = UCB1Learner(len(prices))\n ucb1_old_learner = UCB1Learnerold(len(prices))\n vector_daily_price_ucb1_loc = []\n vector_daily_price_ucb1_old_loc = []\n print('Starting execution ' + str(idx))\n # For every day:\n for t in range(T):\n if t % 20 == 0:\n log(\"Iteration day: {:3d} - execution: {:3d}\".format(t, idx))\n\n # Get new users in the day t and their costs\n [new_user_1, new_user_2, new_user_3] = env.get_all_new_users_daily(bids[0])\n new_users = [new_user_1, new_user_2, new_user_3]\n [cost1, cost2, cost3] = env.get_all_cost_per_click(bids[0])\n cost = [cost1, cost2, cost3]\n\n # Get the total cost\n total_cost = 0\n for user in range(len(new_users)):\n total_cost += new_users[user] * cost[user]\n\n # Choose the arm and thus the price for UCB1\n daily_arm_ucb1 = ucb1_learner.pull_arm()\n daily_price_ucb1 = prices[daily_arm_ucb1]\n vector_daily_price_ucb1_loc.append(daily_price_ucb1)\n\n # Choose the arm and thus the price for ucb1_old\n daily_arm_ucb1_old = ucb1_old_learner.pull_arm()\n daily_price_ucb1_old = prices[daily_arm_ucb1_old]\n vector_daily_price_ucb1_old_loc.append(daily_price_ucb1_old)\n\n # Calculate the number of bought items\n daily_bought_items_per_class_ucb1 = [0, 0, 0]\n daily_bought_items_per_class_ucb1_old = [0, 0, 0]\n\n for user in range(len(new_users)):\n for c in range(new_users[user]):\n daily_bought_items_per_class_ucb1[user] += env.buy(daily_price_ucb1, user + 1)\n daily_bought_items_per_class_ucb1_old[user] += env.buy(daily_price_ucb1_old, user + 1)\n\n # Sum up the n. of bought items\n daily_bought_items_ucb1 = sum(daily_bought_items_per_class_ucb1)\n daily_bought_items_ucb1_old = sum(daily_bought_items_per_class_ucb1_old)\n\n # Calculate the revenue\n daily_revenue_ucb1 = daily_bought_items_ucb1 * env.get_margin(daily_price_ucb1) - total_cost\n daily_revenue_ucb1_old = daily_bought_items_ucb1_old * env.get_margin(daily_price_ucb1_old) - total_cost\n\n # Get delayed rewards UCB1\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1[user - 1], daily_price_ucb1,\n user)))\n\n ucb1_learner.update_observations(daily_arm_ucb1, daily_revenue_ucb1, next_30_days)\n\n # Get delayed rewards UCB1 old\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1_old[user - 1], daily_price_ucb1_old,\n user)))\n\n ucb1_old_learner.update_observations(daily_arm_ucb1_old, daily_revenue_ucb1_old, next_30_days)\n\n print('Ending execution ' + str(idx))\n\n # put results in the given queue\n results_queue.put((ucb1_learner.collected_rewards, ucb1_old_learner.collected_rewards, vector_daily_price_ucb1_loc, vector_daily_price_ucb1_old_loc))",
"def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)",
"def drain_call_queue(self):\n log = get_logger()\n self._is_debug(log) and log.debug(\n f\"ENTER::Partition.drain_call_queue::{self._identity}\"\n )\n if len(self.call_queue) == 0:\n return\n call_queue = self.call_queue\n if len(call_queue) > 1:\n self._is_debug(log) and log.debug(\n f\"SUBMIT::_apply_list_of_funcs::{self._identity}\"\n )\n futures = DaskWrapper.deploy(\n func=apply_list_of_funcs,\n f_args=(call_queue, self._data),\n num_returns=2,\n pure=False,\n )\n else:\n # We handle `len(call_queue) == 1` in a different way because\n # this improves performance a bit.\n func, f_args, f_kwargs = call_queue[0]\n self._is_debug(log) and log.debug(f\"SUBMIT::_apply_func::{self._identity}\")\n futures = DaskWrapper.deploy(\n func=apply_func,\n f_args=(self._data, func, *f_args),\n f_kwargs=f_kwargs,\n num_returns=2,\n pure=False,\n )\n self._data = futures[0]\n self._ip_cache = futures[1]\n self._is_debug(log) and log.debug(\n f\"EXIT::Partition.drain_call_queue::{self._identity}\"\n )\n self.call_queue = []",
"def putting_on_queue(*args):\n results.put(main_func(*args))",
"def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')",
"def execute(self):\n if not self._multiprocessing:\n for counter, subtasktuples in enumerate(self.task_scheduler):\n self._storegate.compile()\n result = self.execute_pipeline(subtasktuples, counter)\n self._history.append(result)\n\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1,\n message=f'metric={result.metric_value}')\n\n else: # multiprocessing\n if self._storegate.backend not in ('numpy', 'hybrid'):\n raise NotImplementedError(\n 'multiprocessing is supported for only numpy and hybrid backend'\n )\n\n ctx = mp.get_context('spawn')\n queue = ctx.Queue()\n args = []\n\n for counter, subtasktuples in enumerate(self.task_scheduler):\n args.append([subtasktuples, counter])\n\n if len(args) == self._num_workers:\n self.execute_jobs(ctx, queue, args)\n args = []\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1)\n\n self.execute_jobs(ctx, queue, args)",
"def _process_command_queue(self, command_queue):\n while True:\n if len(command_queue) > 0:\n command_tuple = command_queue.pop()\n func, kwargs = command_tuple[0], command_tuple[1]\n getattr(self, func)(**kwargs)\n time.sleep(.5)",
"def _worker_main(self, task_queue, data_queue):\r\n while True:\r\n task = task_queue.get()\r\n sample = self._task_func(task)\r\n if sample is None:\r\n continue\r\n data_queue.put(sample)",
"async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download",
"def run(self):\n while True:\n path, params = self.path_queue.get()\n errors = check_path(path, **params)\n self.result_queue.put(errors)\n self.path_queue.task_done()",
"def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue",
"def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d",
"def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))"
] | [
"0.6878254",
"0.653854",
"0.6499669",
"0.6313003",
"0.6291972",
"0.61565644",
"0.6106621",
"0.60317",
"0.6018602",
"0.59836715",
"0.59836715",
"0.59609747",
"0.5950041",
"0.5924165",
"0.5924025",
"0.59115165",
"0.590969",
"0.58802915",
"0.5863303",
"0.5858751",
"0.58530885",
"0.5844668",
"0.5837059",
"0.5830208",
"0.5829739",
"0.5811876",
"0.5807176",
"0.58035445",
"0.5799914",
"0.5799463"
] | 0.7384044 | 0 |
Convert a BGR color tuple (e.g. (0, 147, 255)) to a color tuple that can be used by Matplotlib (e.g. (0, 0.5764705882352941, 1)). | def color_tuple_bgr_to_plt(color_tuple):
return (color_tuple[2]/255, color_tuple[1]/255, color_tuple[0]/255) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def int2color_tuple(x):\n red_val = int(1000 * x % 255)\n green_val = int(10000 * x % 255)\n blue_val = int(100000 * x % 255)\n return red_val, green_val, blue_val",
"def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))",
"def colour_to_rgb_tuple(cls, col_str):\n hex_6 = cls.RE_COLOUR_HEX_6.search(col_str)\n if hex_6:\n #Simply converts hex directly to dec \n return tuple(int(c,16) for c in hex_6.groups())\n hex_3 = cls.RE_COLOUR_HEX_3.search(col_str)\n if hex_3:\n #First must convert single value range 0-15 to range 0-255 \n return tuple(int(int(c,16)/15.0*255.0) for c in hex_3.groups())\n rgb = cls.RE_COLOUR_RGB.search(col_str)\n if rgb:\n return tuple(int(c) for c in rgb.groups()) #Direct output of tuple from regex!\n return None #Otherwise canny do i' captain",
"def matplotlib_rgb_color(rgb_color):\r\n return tuple([i / 255. for i in rgb_color])",
"def rgb(r, g, b):\n return (r/255, g/255, b/255)",
"def translate_rgb(rgb_tuple):\n mapped_rgb_value = []\n for component in rgb_tuple:\n mapped_rgb_value.append(translate(component, 0, 1, 0, 255))\n\n return tuple(mapped_rgb_value)",
"def color_rgb(self):\n return tuple(int(self.color[i : i + 2], 16) for i in (0, 2, 4))",
"def convert_to_RGB_255(colors):\n return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)",
"def rgb_to_color(*rgb):\n if(len(rgb)==1):\n r,g,b = rgb[0]\n else:\n r,g,b = rgb\n return \"#%02x%02x%02x\" % (r,g,b)",
"def color(c):\n\n if isinstance(c, tuple) and len(c) == 4:\n return c\n\n if c is None:\n return c\n\n if isinstance(c, basestring):\n if c[0] == '#':\n c = c[1:]\n\n if len(c) == 6:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = int(c[6]+c[7], 16)\n elif len(c) == 3:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = 255\n elif len(c) == 4:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = int(c[3], 16) * 0x11\n else:\n raise Exception(\"Color string must be 3, 4, 6, or 8 hex digits long.\")\n\n return (r, g, b, a)\n\n raise Exception(\"Not a color: %r\" % (c,))",
"def _binary_to_rgb(*args) -> tuple:\n if len(args) == 1:\n red = args[0][0]\n green = args[0][1]\n blue = args[0][2]\n elif len(args) == 3:\n red = args[0]\n green = args[1]\n blue = args[2]\n else:\n raise ValueError(\n \"Arguments must be RGB tuple or Red, Green, Blue as 3 arguments.\"\n )\n\n r_int = int(red, 2)\n g_int = int(green, 2)\n b_int = int(blue, 2)\n\n return (r_int, g_int, b_int)",
"def rgbTuple(rgb):\n return struct.unpack('BBB',rgb)",
"def color_code_to_rbg_tuple(color_code):\n code_r = color_code[1:3]\n code_g = color_code[3:5]\n code_b = color_code[5:7]\n r = int(code_r, 16)\n g = int(code_g, 16)\n b = int(code_b, 16)\n return r, g, b",
"def hex_to_rgb(self,value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))",
"def hex_to_rgb(value):\r\n lv = len(value)\r\n out = tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\r\n out = tuple([x/256.0 for x in out])\r\n return out",
"def _rgb(color):\n warnings.warn('Use color.rgba instead of color._rgb', FutureWarning, stacklevel=2)\n return (int(color[-6:-4], 16), int(color[-4:-2], 16), int(color[-2:], 16))",
"def color_to_rgb(color):\n \n if isinstance(color, tuple):\n # if a RGB tuple already\n return color\n else:\n # to_rgb() returns colors from (0-1)\n color = tuple(int(x * 255) for x in to_rgb(color))\n return color",
"def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]",
"def _colorDataToTuple(colorData):\n col = colorData.GetColour().Get()\n result = col[0] / 255.0, col[1] / 255.0, col[2] / 255.0\n return result",
"def gbc2rgb(c):\n #GBC format: 0bbbbbgggggrrrrr (b-blue, g-green, r-red)\n r = (c % (1 << 5)) << 3\n g = ((c / (1 << 5)) % (1 << 5)) << 3\n b = ((c / (1 << 10)) % (1 << 5)) << 3\n return (r,g,b)",
"def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))",
"def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))",
"def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))",
"def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))",
"def RGBToHTMLColor(rgb_tuple):\n\tr,g,b=rgb_tuple\n\tr=int(r/255.0*16)\n\tg=int(g/255.0*16)\n\tb=int(b/255.0*16)\n\tif r == 16:\n\t\tr = 15\n\tif g == 16:\n\t\tg = 15\n\tif b == 16:\n\t\tb = 15\n\thexcolor = '#%x%x%x' % (r,g,b)\n\t# that's it! '%02x' means zero-padded, 2-digit hex values\n\treturn hexcolor",
"def test_mage_hsv_tuple_to_rgb(self):\r\n htr = mage_hsv_tuple_to_rgb # for convenience\r\n self.assertEqual(htr((0, 0, 0)), (0, 0, 0))\r\n self.assertEqual(htr((0, 100, 100)), (255, 0, 0))\r\n self.assertEqual(htr((120, 100, 100)), (0, 255, 0))\r\n self.assertEqual(htr((240, 100, 100)), (0, 0, 255))\r\n assert_almost_equal(htr((0, 0, 49.803921568627452)), (127, 127, 127))",
"def clr_tuple(colorstring):\n\n if colorstring[0] == '#':\n if len(colorstring) == 7:\n return (ONE_OVER_256 * float(_hexbyte(colorstring[1:3])),\n ONE_OVER_256 * float(_hexbyte(colorstring[3:5])),\n ONE_OVER_256 * float(_hexbyte(colorstring[5:7])))\n if len(colorstring) == 4:\n return (ONE_OVER_16 * float(_hexchar(colorstring[1])),\n ONE_OVER_16 * float(_hexchar(colorstring[2])),\n ONE_OVER_16 * float(_hexchar(colorstring[3])))\n if colorstring in colors.CSS4_COLORS:\n return clr_tuple(colors.CSS4_COLORS[colorstring])\n if colorstring in colors.BASE_COLORS:\n return clr_tuple(colors.BASE_COLORS[colorstring])\n\n rgb_re = re.compile(\"rgb:(.*),(.*),(.*)\")\n\n rgb_match = rgb_re.search(colorstring)\n if rgb_match:\n return (float(rgb_match.group(1)),\n float(rgb_match.group(2)),\n float(rgb_match.group(3)))\n return None",
"def RGB2HTMLColor(rgb_tuple):\n hexcolor = '#%02x%02x%02x' % rgb_tuple\n # that's it! '%02x' means zero-padded, 2-digit hex values\n return hexcolor",
"def rgb_unpack(rgb: int) -> Tuple[int, int, int]:\n b = rgb & 255\n rg = rgb >> 8\n g = rg & 255\n r = rg >> 8\n return (r, g, b)",
"def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)"
] | [
"0.7479977",
"0.72842014",
"0.723168",
"0.71900856",
"0.7151505",
"0.71181124",
"0.7104675",
"0.7094821",
"0.7084356",
"0.70437115",
"0.7004791",
"0.6984981",
"0.6979475",
"0.69711167",
"0.6894583",
"0.6887978",
"0.6884093",
"0.684709",
"0.6821083",
"0.68084604",
"0.6765526",
"0.6765526",
"0.6765526",
"0.6765526",
"0.6749146",
"0.6737224",
"0.672244",
"0.6695558",
"0.6679797",
"0.66774374"
] | 0.8025614 | 0 |
Calculate the modified twosided KolmogorovSmirnov statistic, adjusted for rightcensored timetodeath data, as described in Fleming et al (1980). This calcuate the residues for a nonparametric test for the null hypothesis that 2 independent samples are drawn from the same continuous distribution. The actual test is performed in the function KSm_test. Reference Thomas R. Fleming, Judith R. O'Fallon, Peter C. O'Brien and David P. Harrington. Biometrics Vol. 36, No. 4 (Dec., 1980), pp. 607625 | def KSm_2samples(data1, relative_time_grid1, data2, relative_time_grid2):
def deltaNA (d, Y):
return np.sum([ np.power(Y-l,-1) for l in range(int(d))])
mortalities1 = pd.Series(data1.loc[data1['status']==1,'mortality'].value_counts(), index=relative_time_grid1).fillna(0)
censored1 = pd.Series(data1.loc[data1['status']==2,'mortality'].value_counts(), index=relative_time_grid1).fillna(0)
counts1 = mortalities1 + censored1
l1 = len(counts1)
atRisk1 = pd.Series(data=np.dot(np.triu(np.ones((l1,l1)),k=0), counts1), index=relative_time_grid1)
survived1 = pd.Series(data=np.dot(np.triu(np.ones((l1,l1)),k=1), counts1) + np.array(censored1), index=relative_time_grid1)
mortalities2 = pd.Series(data2.loc[data2['status']==1,'mortality'].value_counts(), index=relative_time_grid2).fillna(0)
censored2 = pd.Series(data2.loc[data2['status']==2,'mortality'].value_counts(), index=relative_time_grid2).fillna(0)
counts2 = mortalities2 + censored2
l2 = len(counts2)
atRisk2 = pd.Series(data=np.dot(np.triu(np.ones((l2,l2)),k=0), counts2), index=relative_time_grid2)
survived2 = pd.Series(data=np.dot(np.triu(np.ones((l2,l2)),k=1), counts2) + np.array(censored2), index=relative_time_grid2)
#Merge the 2 time_grids
unique_time_in_grid2 = [t for t in relative_time_grid2 if t not in relative_time_grid1]
unique_time_in_grid1 = [t for t in relative_time_grid1 if t not in relative_time_grid2]
relative_time_grid = np.unique(np.sort(relative_time_grid1+relative_time_grid2))
mortalities1 = pd.concat([mortalities1, pd.Series(data=float(0),index=unique_time_in_grid2)]).sort_index()
mortalities2 = pd.concat([mortalities2, pd.Series(data=float(0),index=unique_time_in_grid1)]).sort_index()
def lastValueInTimeSeries(ts, t):
if np.searchsorted(np.array(ts.index), t) != 0:
return ts.iloc[np.searchsorted(np.array(ts.index), t)-1]
else:
return ts.iloc[0]
atRisk1 = pd.concat([atRisk1, pd.Series(data=[lastValueInTimeSeries(atRisk1, t) for t in unique_time_in_grid2],index=unique_time_in_grid2)]).sort_index()
atRisk2 = pd.concat([atRisk2, pd.Series(data=[lastValueInTimeSeries(atRisk2, t) for t in unique_time_in_grid1],index=unique_time_in_grid1)]).sort_index()
n1 = np.sum(counts1)
n2 = np.sum(counts2)
n = n1 + n2
relative_time_grid = np.unique(np.sort(list(atRisk1.index[atRisk1>0])+list(atRisk2.index[atRisk2>0])))
l = len(relative_time_grid)
beta1_NelsonAalen = np.dot(np.tril(np.ones((l,l)),k=0), [deltaNA(mortalities1.loc[ind],atRisk1.loc[ind]) for ind in relative_time_grid] )
beta2_NelsonAalen = np.dot(np.tril(np.ones((l,l)),k=0), [deltaNA(mortalities2.loc[ind],atRisk2.loc[ind]) for ind in relative_time_grid] )
alpha1minus_CumCensor = pd.Series(data=[ deltaNA(n1-atRisk1.loc[relative_time_grid[j]],n1) - beta1_NelsonAalen[j-1] for j in range(1,l)], index=relative_time_grid[1:])
alpha2minus_CumCensor = pd.Series(data=[ deltaNA(n2-atRisk2.loc[relative_time_grid[j]],n2) - beta2_NelsonAalen[j-1] for j in range(1,l)], index=relative_time_grid[1:])
eta_SqrtAvgN = pd.Series(data=[1/np.sqrt( 1/(n1*np.exp(-alpha1minus_CumCensor.loc[ind])) + 1/(n2*np.exp(-alpha2minus_CumCensor.loc[ind])) )
for ind in relative_time_grid[1:]], index=relative_time_grid[1:])
U_NelsonAalenDiffAdjusted = np.dot(np.tril(np.ones((l,l)),k=0),
[0]+[ eta_SqrtAvgN.loc[ind]*(deltaNA(mortalities1.loc[ind],atRisk1.loc[ind])-deltaNA(mortalities2.loc[ind],atRisk2.loc[ind]))
for ind in relative_time_grid[1:]] )
Y_n1n2 = pd.Series(data=0.5 * (np.exp(-beta1_NelsonAalen) + np.exp(-beta2_NelsonAalen)) * U_NelsonAalenDiffAdjusted, index=relative_time_grid )
R_MaxCDF = 1 - 0.5 * (np.exp(-beta1_NelsonAalen[-1]) + np.exp(-beta2_NelsonAalen[-1]))
return KSmResidues(Y_n1n2, R_MaxCDF) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def KSm_test(residues, maxcdf, alternative='two-sided', alpha=0.05):\n \n Ystats = residues\n R_MaxCDF = maxcdf\n R_func = np.sqrt(R_MaxCDF-R_MaxCDF*R_MaxCDF)\n \n if alternative=='two-sided':\n \n A_MaxAbsY = np.max(np.abs(Ystats))\n prob_BrownianBridge = 1-norm.cdf(A_MaxAbsY/R_func) + norm.cdf((2*R_MaxCDF-1)*A_MaxAbsY/R_func)*np.exp(-2*A_MaxAbsY*A_MaxAbsY)\n alpha = alpha/2\n \n if prob_BrownianBridge < 0.4:\n if alpha > 0.4:\n raise Exception('alpha >0.4 could not be used with Schey (1977) two-sided approximation')\n else:\n return KStestResult(A_MaxAbsY, prob_BrownianBridge*2, \n newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )\n else:\n return KStestResult(A_MaxAbsY, '>= 0.8',\n newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )\n \n if alternative=='one-sided':\n \n V_MaxY = np.max([0]+list(Ystats.values))\n prob_BrownianBridge = 1-norm.cdf(V_MaxY/R_func) + norm.cdf((2*R_MaxCDF-1)*V_MaxY/R_func)*np.exp(-2*V_MaxY*V_MaxY)\n return KStestResult(V_MaxY, prob_BrownianBridge, \n newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )\n else:\n raise Exception('Unrecognised alternative option:'+str(alternative))",
"def test_ks_ms(distribution):\n print(\"TESTING: K-S model selection for %s distribution\" % distribution.upper())\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" creating sample\")\n test_sample = dist.samples(distribution, params)\n print(\" calculating K-S statistics for all distributions\")\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n fit_results = {}\n best_ksd = 1.0\n best_model = dist.get()[0]\n for d in dist.get():\n print(\" %s:\" % d.upper())\n fit_results[d] = fit.fit_ks(d, test_sample)\n if fit_results[d]['D'] < best_ksd:\n best_ksd = fit_results[d]['D']\n best_model = d\n print(\" %s\" % dist.get_params(fit_results[d]['params'], d))\n print(\" D = %r\" % fit_results[d]['D'])\n params = fit_results[d]['params']\n p = 0\n for r in range(100):\n synthetic_sample = dist.samples(d, params, len(test_sample))\n ksd = me.ks_statistics(dist.get_sample_cdf(synthetic_sample), dist.cdf(d, params, np.max(synthetic_sample)))\n if ksd > fit_results[d]['D']:\n p += 1\n print(\" p = %r\" % (float(p)/100.0))\n print(\" Best fitting model: %s\" % best_model.upper())\n print_pmfs(test_sample, fit_results, 'TEST-KS.CSV')",
"def SkewedStudentLikelihoodHeteroscedastic(\n data, comparedata, measerror=None, params=None\n):\n __standartChecksBeforeStart(data, comparedata)\n if measerror is None:\n measerror = __generateMeaserror(data)\n\n measerror = np.array(measerror)\n measerror = __jitter_measerror_if_needed(\n \"SkewedStudentLikelihoodHeteroscedastic\", measerror\n )\n\n diff = np.array(__calcSimpleDeviation(data, comparedata))\n\n paramDependencies = [\"likelihood_nu\", \"likelihood_kappa\", \"likelihood_phi\"]\n\n if params is None:\n # based on VRUGTS paper, footnote \"YING\", page 307\n nu = np.random.uniform(2.001, 100, 1)\n k = np.random.uniform(0.001, 100, 1)\n phi = np.random.uniform(-0.99, 0.99, 1)\n\n else:\n missingparams = []\n randomparset, parameternames = params\n\n randomparset = np.array(randomparset)\n parameternames = np.array(parameternames)\n\n for nm in paramDependencies:\n if nm not in parameternames:\n missingparams.append(nm)\n\n if missingparams.__len__() > 0:\n raise LikelihoodError(\n \"Unfortunately contains your param list not all parameters which are needed for this class.\"\n \"Following parameter are needed, too: \" + str(missingparams)\n )\n\n nu = randomparset[parameternames == \"likelihood_nu\"][0]\n k = randomparset[parameternames == \"likelihood_kappa\"][0]\n phi = randomparset[parameternames == \"likelihood_phi\"][0]\n\n if abs(phi) > 1:\n warnings.warn(\n \"[SkewedStudentLikelihoodHeteroscedastic] The parameter 'phi' should be between -1 and 1 and is: \"\n + str(phi)\n )\n return np.NAN\n if nu <= 2:\n warnings.warn(\n \"[SkewedStudentLikelihoodHeteroscedastic] The parameter 'nu' should be greater then 2 and is: \"\n + str(nu)\n )\n return np.NAN\n if k <= 0:\n warnings.warn(\n \"[SkewedStudentLikelihoodHeteroscedastic] The parameter 'k' should be greater then 0 and is: \"\n + str(k)\n )\n return np.NAN\n\n eta_all = diff[1:] - phi * diff[:-1] * np.sqrt(1 - phi**2)\n c_1 = (\n (k**2 - 1 / (k**2))\n * 2\n * math.gamma((nu + 1) / 2)\n * np.sqrt(nu / (nu - 2))\n * (nu - 2)\n ) / ((k + (1 / k)) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * (nu - 1))\n\n for_c2 = -1 * (c_1) ** 2 + (k**3 + 1 / k**3) / (k + 1 / k)\n\n c_2 = np.sqrt(for_c2)\n\n # TODO Maximizing with negative to zero?\n return np.log(\n -np.prod(\n (2 * c_2 * math.gamma((nu + 1) / 2) * np.sqrt(nu / (nu - 2)))\n / (\n (k + 1 / k)\n * math.gamma(nu / 2)\n * np.sqrt(np.pi * nu)\n * np.sqrt(1 - phi**2)\n * measerror[1:]\n )\n * (\n 1\n + (1 / (nu - 2))\n * ((c_1 + c_2 * eta_all) / (k ** (np.sign(c_1 + c_2 * eta_all)))) ** 2\n )\n ** (-(nu + 1) / 2)\n )\n )",
"def ks_test_function(dim, thresh):\n def ks_test(table1, table2):\n from scipy.stats import ks_2samp\n sample1 = table1.get_cols(dim)[0]\n sample2 = table2.get_cols(dim)[0]\n if thresh != None:\n sample1[sample1<thresh] = 0\n sample2[sample2<thresh] = 0\n ks, p_ks = ks_2samp(sample1, sample2)\n return ks\n return ks_test",
"def ks_test(timeseries):\r\n\r\n hour_ago = time() - 3600\r\n ten_minutes_ago = time() - 600\r\n reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])\r\n probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])\r\n\r\n if reference.size < 20 or probe.size < 20:\r\n return False\r\n\r\n ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)\r\n\r\n if ks_p_value < 0.05 and ks_d > 0.5:\r\n adf = sm.tsa.stattools.adfuller(reference, 10)\r\n if adf[1] < 0.05:\r\n return True\r\n\r\n return False",
"def test_hky_nielsen(self):\n distribution = {'A':.2,'C':.3,'G':.3,'T':.2}\n kappa = 2\n rate_matrix_object = RateMatrix.get_unscaled_hky85_rate_matrix(distribution, kappa)\n rate_matrix_object.normalize()\n rate_matrix = rate_matrix_object.get_dictionary_rate_matrix()\n path_length = 2\n initial_state = 'A'\n terminal_state = 'C'\n states = 'ACGT'\n iterations = 200\n rejection_changes = []\n i = 0\n while i < iterations:\n rejection_events = get_rejection_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if rejection_events is not None:\n rejection_changes.append(len(rejection_events))\n i += 1\n nielsen_changes = []\n i = 0\n while i < iterations:\n nielsen_events = get_nielsen_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if nielsen_events is not None:\n nielsen_changes.append(len(nielsen_events))\n i += 1\n t, p = scipy.stats.mannwhitneyu(rejection_changes, nielsen_changes)\n self.failIf(p < .001)",
"def calculate_kolmogorov_smirnov(p, q, num_samples=100, _random_state=None): # pylint: disable=unused-argument\n logging.warning(\"This function is deprecated, please use `calculate_goodness_of_fit`\") # pragma: no cover",
"def KS(dataDict=None, dataLabel='data', mode='two.sided'):\n\n # test calling values\n modes = ['two.sided', 'less', 'greater']\n if mode not in modes:\n raise ValueError('RSTATS.KS: mode must be in: ', modes)\n if (dataDict is None or not isinstance(dataDict, dict) \n or len(dataDict.keys()) != 2):\n raise ValueError('RSTATS.KS: dataDict must be a dictionary with '\n + 'exactly 2 keys')\n\n labels = list(dataDict.keys())\n# NGroups = len(labels)\n cmdx = 'X=c(%s)' % ', '.join(str(x) for x in dataDict[labels[0]])\n cmdy = 'Y=c(%s)' % ', '.join(str(y) for y in dataDict[labels[1]])\n# package \"perm\" not available on mac os x, use coin instead\n# importr('perm')\n robjects.r(cmdx)\n robjects.r(cmdy)\n\n# (pvalue, nmc) = permutation(dataDict, dataDict.keys())\n\n u = robjects.r(\"ks.test(X, Y, alternative='%s')\" % mode)\n\n pvalue = float(u[1][0])\n statvalue = float(u[0][0]) # get diff estimate\n if dataLabel is not None:\n print ('\\nKolmogorov-Smirnov test. Dataset = %s' % (dataLabel))\n print(u' Test statistic: {:8.4f}'.format(statvalue))\n print(u' p={:8.6f}, [mode={:s}]'.format(float(pvalue), mode))\n return pvalue",
"def test_kpss(self):\n\n kpsstest = kpss(self.ts_df['y'], regression='c')\n kpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])\n for key, value in kpsstest[3].items():\n kpss_output['Critical Value (%s)' % key] = value\n print(kpss_output)\n if kpsstest[0] > kpsstest[3]['5%']:\n print(\n \"Test statistic greater than critical value at 5% --> series seems to be not stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be greater than test statistic.\")\n else:\n print(\n \"Test statistic less than critical value at 5% --> series seems to be stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be less than test statistic.\")",
"def ks_test(df1, df2):\n p_val_list = []\n stat_list = []\n for element in df1.columns:\n res = stats.ks_2samp(df1[element], df2[element])\n p_val_list.append(res[1])\n stat_list.append(res[0])\n n = np.argmax(stat_list)\n p_val = p_val_list[n]\n stat = stat_list[n]\n return p_val, stat, n, p_val_list, stat_list",
"def _two_sample_kolmogorov_smirnov_pmf(\n pmf1: np.ndarray, pmf2: np.ndarray, alpha: float = 0.05\n) -> Tuple[float, float, bool]:\n # note: yields different results as `scipy.stats.ks_2samp`\n\n cdf1 = np.cumsum(pmf1)\n cdf2 = np.cumsum(pmf2)\n\n n1 = cdf1[-1]\n n2 = cdf2[-1]\n\n # cannot be inplace because of type conversion\n cdf1 = cdf1 / n1\n cdf2 = cdf2 / n2\n\n statistic, pvalue = _two_sample_kolmogorov_smirnov_same_length(cdf1, cdf2, n1, n2)\n reject = pvalue < alpha\n return statistic, pvalue, reject",
"def test_ks_test(mode):\n indices = np.random.randint(0, 1000, 1000)\n out = compute_indices_ks_test(indices, 1000, mode=mode)\n assert all([o > 0.0 for o in out])",
"def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference",
"def KSm_gof(data, relative_time_grid, sf, args=(), alternative='two-sided'):\n \n def deltaNA (d, Y):\n return np.sum([ np.power(Y-l,-1) for l in range(int(d))])\n \n mortalities = pd.Series(data.loc[data['status']==1,'mortality'].value_counts(), index=relative_time_grid).fillna(0)\n censored = pd.Series(data.loc[data['status']==2,'mortality'].value_counts(), index=relative_time_grid).fillna(0)\n counts = mortalities + censored\n n = len(counts)\n atRisk = pd.Series(data=np.dot(np.triu(np.ones((n,n)),k=0), counts), index=relative_time_grid)\n survived = pd.Series(data=np.dot(np.triu(np.ones((n,n)),k=1), counts) + np.array(censored), index=relative_time_grid)\n \n sfvals = sf(relative_time_grid, *args)\n \n def deltaNA (d, Y):\n return np.sum([ np.power(Y-l,-1) for l in range(int(d))])\n beta_NelsonAalen = np.dot(np.tril(np.ones((n,n)),k=0), [deltaNA(mortalities.loc[ind],atRisk.loc[ind]) for ind in atRisk.index] )\n alpha_CumCensor = np.dot(np.tril(np.ones((n,n)),k=0), [deltaNA(censored.loc[ind],survived.loc[ind]) for ind in survived.index] )\n A_CumHazardCenAdjusted = np.dot(np.tril(np.ones((n,n)),k=0),\n [0]+[np.exp(-0.5*alpha_CumCensor[i-1])*np.log(sfvals[i-1]/sfvals[i]) for i in range(1,len(atRisk))] )\n B_NelsonAalenCenAdjusted = np.dot(np.tril(np.ones((n,n)),k=0),\n [0]+[np.exp(-0.5*alpha_CumCensor[i-1])*deltaNA(mortalities.iloc[i],atRisk.iloc[i]) for i in range(1,len(atRisk))] )\n Yminus = np.sqrt(n) * 0.5 * (sfvals[1:]+np.exp(-beta_NelsonAalen[:-1])) * (A_CumHazardCenAdjusted[1:] - B_NelsonAalenCenAdjusted[:-1])\n Yplus = np.sqrt(n) * 0.5 * (sfvals+np.exp(-beta_NelsonAalen)) * (A_CumHazardCenAdjusted - B_NelsonAalenCenAdjusted)\n Yconcat = pd.concat([pd.Series(data=Yplus,index=atRisk.index), pd.Series(data=Yminus,index=atRisk.index[1:])]).sort_index()\n R_MaxCDF = 1 - 0.5*(np.exp(-beta_NelsonAalen[-1])+sfvals[-1])\n \n return KSmResidues(Yconcat, R_MaxCDF)",
"def ks_test(a,b):\n a,b = np.asarray(a),np.asarray(b)\n if len(a) != len(a):\n raise ValueError(\"a and b must have the same size\")\n \n return stats.ks_2samp(a,b)",
"def KS(self, using, dx=0.0001):\n pits = np.array(self.PIT(using=using,dx=dx))\n ks_result = skgof.ks_test(pits, stats.uniform())\n return ks_result.statistic, ks_result.pvalue",
"def test_xml_kpointsw(xml_parser):\n \n kpointsw = xml_parser.get_kpointsw()\n test = np.array([0.00462963, 0.02777778, 0.02777778,\n 0.01388889, 0.05555556, 0.11111111,\n 0.05555556, 0.05555556, 0.05555556,\n 0.01388889, 0.03703704, 0.11111111,\n 0.05555556, 0.11111111, 0.11111111,\n 0.02777778, 0.03703704, 0.05555556,\n 0.02777778, 0.00462963])\n np.testing.assert_allclose(kpointsw, test)",
"def SkewedStudentLikelihoodHomoscedastic(data, comparedata, measerror=None):\n __standartChecksBeforeStart(data, comparedata)\n if measerror is None:\n measerror = __generateMeaserror(data)\n\n measerror = np.mean(measerror)\n\n res = np.array(__calcSimpleDeviation(data, comparedata))\n\n # TODO Maximizing with negative to zero?\n # Original: -np.prod(1 / (np.sqrt(2 * np.pi) * measerror) * np.exp(-1 * (res ** 2) / (2)))\n return -np.sum(\n (1 / (np.sqrt(2 * np.pi) * measerror) * np.exp(-1 * (res**2) / (2)))\n )",
"def ks_2samp(a, b, aw=None, bw=None):\n\n # Methodology for weighted Kolmogorov-Smirnov test taken from Numerical\n # Methods of Statistics - J. Monahan\n\n ab = np.sort(np.concatenate((a, b)))\n\n D = np.max(np.abs(ecdf(a, aw)(ab) - ecdf(b, bw)(ab)))\n\n n1 = len(a) if aw is None else np.sum(aw) ** 2 / np.sum(aw ** 2)\n n2 = len(b) if bw is None else np.sum(bw) ** 2 / np.sum(bw ** 2)\n\n en = np.sqrt(n1 * n2 / float(n1 + n2))\n\n p = kstwobign.sf((en + 0.12 + 0.11 / en) * D) # Stephens (1970)\n\n return D, p",
"def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n assert_allclose((D, Pval), (0.46, 3.801e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n assert_allclose((D, Pval), (0.46, 5.084e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n assert_allclose((D, Pval), (0.48, 0.001772), rtol=1e-3)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n assert_allclose((D, Pval), (6.9388939039072284e-18, 1.0), rtol=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)",
"def sp_tests(reg):\r\n if reg.w != None:\r\n try:\r\n w = reg.w.sparse\r\n except:\r\n w = reg.w\r\n Phi = reg.predy\r\n phi = reg.phiy\r\n # Pinkse_error:\r\n Phi_prod = Phi * (1 - Phi)\r\n u_naive = reg.u_naive\r\n u_gen = reg.u_gen\r\n sig2 = np.sum((phi * phi) / Phi_prod) / reg.n\r\n LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2\r\n trWW = np.sum((w * w).diagonal())\r\n trWWWWp = trWW + np.sum((w * w.T).diagonal())\r\n LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))\r\n LM_err = np.array([LM_err, chisqprob(LM_err, 1)])\r\n # KP_error:\r\n moran = moran_KP(reg.w, u_naive, Phi_prod)\r\n # Pinkse-Slade_error:\r\n u_std = u_naive / np.sqrt(Phi_prod)\r\n ps_num = np.dot(u_std.T, (w * u_std)) ** 2\r\n trWpW = np.sum((w.T * w).diagonal())\r\n ps = float(ps_num / (trWW + trWpW))\r\n # chi-square instead of bootstrap.\r\n ps = np.array([ps, chisqprob(ps, 1)])\r\n else:\r\n raise Exception(\"W matrix must be provided to calculate spatial tests.\")\r\n return LM_err, moran, ps",
"def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n self.assertFloatEqual((D, Pval), (0.46, 3.801e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n self.assertFloatEqual((D, Pval), (0.46, 5.084e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n self.assertFloatEqual((D,Pval), (0.48, 0.001772), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n self.assertFloatEqual((D,Pval), (6.9388939039072284e-18, 1.), eps=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)",
"def test_pkstwo(self):\n assert_allclose(pkstwo(2.3), [1 - 5.084e-05], rtol=1e-5)",
"def _wk_test(self, alternative='two-sided', alpha=0.01):\n\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n u_wk, p_wk = mannwhitneyu(q0, q1, alternative=alternative)\n stats_wk = ranksums(q0, q1)[0]\n\n if p_wk <= alpha:\n h = 1\n else:\n h = 0\n\n stats_wk = {'zval': stats_wk, 'pval': p_wk}\n\n return h, stats_wk",
"def test_sparsity_topk_mag_dykstra(self, l=1e-3):\n n = int(1e3)\n seed = np.random.RandomState(0)\n y = jnp.array(seed.randn(n))\n k = 10\n out_top_k_dykstra = sparse_soft_topk_mag_dykstra(y, k, l=l)\n self.assertAlmostEqual(\n len(jnp.where(jnp.absolute(out_top_k_dykstra) > 0)[0]), k\n )",
"def test2Samp():\n\n sigmax = 1.0\n sigmay = 3.0\n mux = 0.0\n muy = 3.0\n nx = 10\n ny = 10\n # Update\n np.random.RandomState(0) # set seed to 0\n datax = sigmax * np.random.randn(nx) + mux\n datay = sigmay * np.random.randn(ny) + muy\n datadict = {'x': datax, 'y': datay}\n ranksums(datadict, dataLabel='Test Rank Sums (scipy)')\n ranksums(datadict, dataLabel='Test Rank Sums, Paired (scipy)', paired=True)\n ttest(datadict, dataLabel='Standard t-test (scipy)', \n textline=True, decimals=3, units='mV')\n ttest(datadict, dataLabel='Standard t-test (scipy), paired', paired=True,\n textline=True, decimals=3)\n (p, n) = permTS(datadict, dataLabel='R permTS')\n permutation(datadict, dataLabel='Test simple permute')\n KS(datadict, dataLabel='Test with KS')",
"def kge_2012(simulated_array, observed_array, s=(1, 1, 1), replace_nan=None,\n replace_inf=None, remove_neg=False, remove_zero=False, return_all=False):\n \n simulated_array, observed_array = treat_values(\n simulated_array,\n observed_array,\n replace_nan=replace_nan,\n replace_inf=replace_inf,\n remove_neg=remove_neg,\n remove_zero=remove_zero\n )\n\n # Means\n sim_mean = np.mean(simulated_array)\n obs_mean = np.mean(observed_array)\n\n # Standard Deviations\n sim_sigma = np.std(simulated_array)\n obs_sigma = np.std(observed_array)\n\n # Pearson R\n top_pr = np.sum((observed_array - obs_mean) * (simulated_array - sim_mean))\n bot1_pr = np.sqrt(np.sum((observed_array - obs_mean) ** 2))\n bot2_pr = np.sqrt(np.sum((simulated_array - sim_mean) ** 2))\n pr = top_pr / (bot1_pr * bot2_pr)\n\n # Ratio between mean of simulated and observed data\n beta = sim_mean / obs_mean\n\n # CV is the coefficient of variation (standard deviation / mean)\n sim_cv = sim_sigma / sim_mean\n obs_cv = obs_sigma / obs_mean\n\n # Variability Ratio, or the ratio of simulated CV to observed CV\n gam = sim_cv / obs_cv\n\n if obs_mean != 0 and obs_sigma != 0 and sim_mean != 0:\n kge = 1 - np.sqrt(\n (s[0] * (pr - 1)) ** 2 + (s[1] * (gam - 1)) ** 2 + (s[2] * (beta - 1)) ** 2)\n else:\n if obs_mean == 0:\n warnings.warn(\n 'Warning: The observed data mean is 0. Therefore, Beta is infinite and the KGE '\n 'value cannot be computed.')\n if obs_sigma == 0:\n warnings.warn(\n 'Warning: The observed data standard deviation is 0. Therefore, Gamma is infinite '\n 'and the KGE value cannot be computed.')\n if sim_mean == 0:\n warnings.warn(\n 'Warning: The simulated data mean is 0. Therefore, Gamma is infinite '\n 'and the KGE value cannot be computed.')\n kge = np.nan\n\n assert type(return_all) == bool, \"expected <type 'bool'> for parameter return_all, got {}\".format(type(return_all))\n\n if return_all:\n return pr, gam, beta, kge\n else:\n return kge",
"def test_mtm_cross_spectrum():\r\n NW = 4\r\n K = 2 * NW - 1\r\n\r\n N = 2 ** 10\r\n n_reps = 10\r\n n_freqs = N\r\n\r\n tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)\r\n\r\n est_psd = []\r\n for k in range(n_reps):\r\n data, nz, alpha = utils.ar_generator(N=N)\r\n fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)\r\n # 'one-sided', so multiply by 2:\r\n psd = 2 * (hz * hz.conj()).real\r\n\r\n tdata = tapers * data\r\n\r\n tspectra = fftpack.fft(tdata)\r\n\r\n L = N / 2 + 1\r\n sides = 'onesided'\r\n w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)\r\n\r\n sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)\r\n est_psd.append(sxx)\r\n\r\n fxx = np.mean(est_psd, 0)\r\n\r\n psd_ratio = np.mean(fxx / psd)\r\n\r\n # This is a rather lenient test, making sure that the average ratio is 1 to\r\n # within an order of magnitude. That is, that they are equal on average:\r\n npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)\r\n\r\n # Test raising of error in case the inputs don't make sense:\r\n npt.assert_raises(ValueError,\r\n tsa.mtm_cross_spectrum,\r\n tspectra, np.r_[tspectra, tspectra],\r\n (w, w))",
"def eps_MTSI(omg, kx, ky, kz, prt=PlasmaParameters()):\n\n k2 = kz ** 2 + ky ** 2\n\n if k2 == 0:\n raise RuntimeError(\"The wave vector is Zero !!\")\n\n iEps = 1/omg**2\n eEpsz = prt.mi_over_me * ( kz**2 ) / ( (omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 * k2 )\n eEpsy = prt.mi_over_me * ( ky**2 ) / ( ((omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 - prt.electronCyclotronFrequency**2/ (prt.ionPlasmaFrequency/u.rad)**2)* k2 )\n\n return 1 - iEps - eEpsz - eEpsy",
"def kpss(x, regression='c', lags=None, store=False):\n nobs = len(x)\n x = np.asarray(x)\n hypo = regression.lower()\n\n # if m is not one, n != m * n\n if nobs != x.size:\n raise ValueError(\"x of shape {0} not understood\".format(x.shape))\n\n if hypo == 'ct':\n # p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,\n # where beta is the trend, r_t a random walk and e_t a stationary\n # error term.\n resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid\n crit = [0.119, 0.146, 0.176, 0.216]\n elif hypo == 'c':\n # special case of the model above, where beta = 0 (so the null\n # hypothesis is that the data is stationary around r_0).\n resids = x - x.mean()\n crit = [0.347, 0.463, 0.574, 0.739]\n else:\n raise ValueError(\"hypothesis '{0}' not understood\".format(hypo))\n\n if lags is None:\n # from Kwiatkowski et al. referencing Schwert (1989)\n lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))\n\n pvals = [0.10, 0.05, 0.025, 0.01]\n\n eta = (resids.cumsum()**2).sum() / nobs**2 # eq. 11, p. 165\n s_hat = _sigma_est_kpss(resids, nobs, lags)\n\n kpss_stat = eta / s_hat\n p_value = np.interp(kpss_stat, crit, pvals)\n\n if p_value == pvals[-1]:\n warnings.warn(\"p-value is smaller than the indicated p-value\",\n InterpolationWarning)\n elif p_value == pvals[0]:\n warnings.warn(\"p-value is greater than the indicated p-value\",\n InterpolationWarning)\n\n crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}\n\n if store:\n rstore = ResultsStore()\n rstore.lags = lags\n rstore.nobs = nobs\n\n stationary_type = \"level\" if hypo == 'c' else \"trend\"\n rstore.H0 = \"The series is {0} stationary\".format(stationary_type)\n rstore.HA = \"The series is not {0} stationary\".format(stationary_type)\n\n return kpss_stat, p_value, crit_dict, rstore\n else:\n return kpss_stat, p_value, lags, crit_dict"
] | [
"0.6556137",
"0.6203104",
"0.61933494",
"0.61850893",
"0.6151925",
"0.60617256",
"0.60514313",
"0.6029852",
"0.5988094",
"0.5967601",
"0.5933769",
"0.59139836",
"0.5897438",
"0.58525157",
"0.5850537",
"0.58362997",
"0.58152133",
"0.5803236",
"0.58009744",
"0.57719064",
"0.5768434",
"0.5711885",
"0.5672237",
"0.5664635",
"0.5589002",
"0.558141",
"0.5568792",
"0.555755",
"0.5544846",
"0.5513523"
] | 0.6239925 | 1 |
Performs the KolmogorovSmirnov 1or2sided tests modified for right censored timetodeath data, based on residues calculated from functions KSm_2samples or KSm_gof. The test statistic is based on a timetransformed Brownian bridge, more precisely, the sumprema of a Brownian bridge restricted to (0, maxcdf). Asymptotic distribution is described in Fleming et al (1980) and Schey (1977). The twosided version uses a conservative approximation described in Schey (1977), p ~ 2p(A,R), when the onesided probability < 0.4. The exact twosided probability could be calculated, but not yet implemented, see Koziol and Byar (1975). Reference [1] Thomas R. Fleming, Judith R. O'Fallon, Peter C. O'Brien and David P. Harrington. Biometrics Vol. 36, No. 4 (Dec., 1980), pp. 607625. [2] Schey, H. M. (1977). Communications in Statistics A6, 13611365. [3] Koziol, J. R. and Byar, D. P. (1975). Technometrics 17, 507510. | def KSm_test(residues, maxcdf, alternative='two-sided', alpha=0.05):
Ystats = residues
R_MaxCDF = maxcdf
R_func = np.sqrt(R_MaxCDF-R_MaxCDF*R_MaxCDF)
if alternative=='two-sided':
A_MaxAbsY = np.max(np.abs(Ystats))
prob_BrownianBridge = 1-norm.cdf(A_MaxAbsY/R_func) + norm.cdf((2*R_MaxCDF-1)*A_MaxAbsY/R_func)*np.exp(-2*A_MaxAbsY*A_MaxAbsY)
alpha = alpha/2
if prob_BrownianBridge < 0.4:
if alpha > 0.4:
raise Exception('alpha >0.4 could not be used with Schey (1977) two-sided approximation')
else:
return KStestResult(A_MaxAbsY, prob_BrownianBridge*2,
newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )
else:
return KStestResult(A_MaxAbsY, '>= 0.8',
newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )
if alternative=='one-sided':
V_MaxY = np.max([0]+list(Ystats.values))
prob_BrownianBridge = 1-norm.cdf(V_MaxY/R_func) + norm.cdf((2*R_MaxCDF-1)*V_MaxY/R_func)*np.exp(-2*V_MaxY*V_MaxY)
return KStestResult(V_MaxY, prob_BrownianBridge,
newton(lambda x:1-alpha-norm.cdf(x/R_func) + norm.cdf((2*R_MaxCDF-1)*x/R_func)*np.exp(-2*x*x), 1.5) )
else:
raise Exception('Unrecognised alternative option:'+str(alternative)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n assert_allclose((D, Pval), (0.46, 3.801e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n assert_allclose((D, Pval), (0.46, 5.084e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n assert_allclose((D, Pval), (0.53, 0.0003576), rtol=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n assert_allclose((D, Pval), (0.48, 0.001772), rtol=1e-3)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n assert_allclose((D, Pval), (6.9388939039072284e-18, 1.0), rtol=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n assert_allclose((D, Pval), (0.46, 2.542e-05), rtol=1e-4)",
"def test_ks2x(self):\n D, Pval = ks_test(self.x1, self.x2)\n self.assertFloatEqual((D, Pval), (0.46, 3.801e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, exact=False)\n self.assertFloatEqual((D, Pval), (0.46, 5.084e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2[:20])\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x2[:20], self.x1)\n self.assertFloatEqual((D,Pval), (0.53, 0.0003576), eps=1e-4)\n D, Pval = ks_test(self.x1[:20], self.x2)\n self.assertFloatEqual((D,Pval), (0.48, 0.001772), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"greater\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"g\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)\n D, Pval = ks_test(self.x1, self.x2, alt=\"less\")\n self.assertFloatEqual((D,Pval), (6.9388939039072284e-18, 1.), eps=1e-4)\n D, Pval = ks_test(self.x2, self.x1, alt=\"l\")\n self.assertFloatEqual((D,Pval), (0.46, 2.542e-05), eps=1e-4)",
"def ks_test_function(dim, thresh):\n def ks_test(table1, table2):\n from scipy.stats import ks_2samp\n sample1 = table1.get_cols(dim)[0]\n sample2 = table2.get_cols(dim)[0]\n if thresh != None:\n sample1[sample1<thresh] = 0\n sample2[sample2<thresh] = 0\n ks, p_ks = ks_2samp(sample1, sample2)\n return ks\n return ks_test",
"def KSm_2samples(data1, relative_time_grid1, data2, relative_time_grid2):\n \n def deltaNA (d, Y):\n return np.sum([ np.power(Y-l,-1) for l in range(int(d))])\n\n mortalities1 = pd.Series(data1.loc[data1['status']==1,'mortality'].value_counts(), index=relative_time_grid1).fillna(0)\n censored1 = pd.Series(data1.loc[data1['status']==2,'mortality'].value_counts(), index=relative_time_grid1).fillna(0)\n counts1 = mortalities1 + censored1\n l1 = len(counts1)\n atRisk1 = pd.Series(data=np.dot(np.triu(np.ones((l1,l1)),k=0), counts1), index=relative_time_grid1)\n survived1 = pd.Series(data=np.dot(np.triu(np.ones((l1,l1)),k=1), counts1) + np.array(censored1), index=relative_time_grid1)\n \n mortalities2 = pd.Series(data2.loc[data2['status']==1,'mortality'].value_counts(), index=relative_time_grid2).fillna(0)\n censored2 = pd.Series(data2.loc[data2['status']==2,'mortality'].value_counts(), index=relative_time_grid2).fillna(0)\n counts2 = mortalities2 + censored2\n l2 = len(counts2)\n atRisk2 = pd.Series(data=np.dot(np.triu(np.ones((l2,l2)),k=0), counts2), index=relative_time_grid2)\n survived2 = pd.Series(data=np.dot(np.triu(np.ones((l2,l2)),k=1), counts2) + np.array(censored2), index=relative_time_grid2)\n \n #Merge the 2 time_grids\n unique_time_in_grid2 = [t for t in relative_time_grid2 if t not in relative_time_grid1]\n unique_time_in_grid1 = [t for t in relative_time_grid1 if t not in relative_time_grid2]\n \n relative_time_grid = np.unique(np.sort(relative_time_grid1+relative_time_grid2))\n mortalities1 = pd.concat([mortalities1, pd.Series(data=float(0),index=unique_time_in_grid2)]).sort_index()\n mortalities2 = pd.concat([mortalities2, pd.Series(data=float(0),index=unique_time_in_grid1)]).sort_index()\n \n def lastValueInTimeSeries(ts, t):\n if np.searchsorted(np.array(ts.index), t) != 0:\n return ts.iloc[np.searchsorted(np.array(ts.index), t)-1]\n else:\n return ts.iloc[0]\n \n atRisk1 = pd.concat([atRisk1, pd.Series(data=[lastValueInTimeSeries(atRisk1, t) for t in unique_time_in_grid2],index=unique_time_in_grid2)]).sort_index()\n atRisk2 = pd.concat([atRisk2, pd.Series(data=[lastValueInTimeSeries(atRisk2, t) for t in unique_time_in_grid1],index=unique_time_in_grid1)]).sort_index()\n n1 = np.sum(counts1)\n n2 = np.sum(counts2)\n n = n1 + n2\n relative_time_grid = np.unique(np.sort(list(atRisk1.index[atRisk1>0])+list(atRisk2.index[atRisk2>0])))\n l = len(relative_time_grid)\n \n beta1_NelsonAalen = np.dot(np.tril(np.ones((l,l)),k=0), [deltaNA(mortalities1.loc[ind],atRisk1.loc[ind]) for ind in relative_time_grid] )\n beta2_NelsonAalen = np.dot(np.tril(np.ones((l,l)),k=0), [deltaNA(mortalities2.loc[ind],atRisk2.loc[ind]) for ind in relative_time_grid] )\n alpha1minus_CumCensor = pd.Series(data=[ deltaNA(n1-atRisk1.loc[relative_time_grid[j]],n1) - beta1_NelsonAalen[j-1] for j in range(1,l)], index=relative_time_grid[1:])\n alpha2minus_CumCensor = pd.Series(data=[ deltaNA(n2-atRisk2.loc[relative_time_grid[j]],n2) - beta2_NelsonAalen[j-1] for j in range(1,l)], index=relative_time_grid[1:])\n \n eta_SqrtAvgN = pd.Series(data=[1/np.sqrt( 1/(n1*np.exp(-alpha1minus_CumCensor.loc[ind])) + 1/(n2*np.exp(-alpha2minus_CumCensor.loc[ind])) )\n for ind in relative_time_grid[1:]], index=relative_time_grid[1:])\n U_NelsonAalenDiffAdjusted = np.dot(np.tril(np.ones((l,l)),k=0), \n [0]+[ eta_SqrtAvgN.loc[ind]*(deltaNA(mortalities1.loc[ind],atRisk1.loc[ind])-deltaNA(mortalities2.loc[ind],atRisk2.loc[ind]))\n for ind in relative_time_grid[1:]] )\n \n Y_n1n2 = pd.Series(data=0.5 * (np.exp(-beta1_NelsonAalen) + np.exp(-beta2_NelsonAalen)) * U_NelsonAalenDiffAdjusted, index=relative_time_grid )\n R_MaxCDF = 1 - 0.5 * (np.exp(-beta1_NelsonAalen[-1]) + np.exp(-beta2_NelsonAalen[-1]))\n \n return KSmResidues(Y_n1n2, R_MaxCDF)",
"def test_G_2_by_2_2tailed_examples(self):\r\n # example from p 731, Sokal and Rohlf (1995)\r\n # without correction\r\n self.assertFloatEqual(G_2_by_2(12, 22, 16, 50, False, False)[0],\r\n 1.33249, 0.0001)\r\n self.assertFloatEqual(G_2_by_2(12, 22, 16, 50, False, False)[1],\r\n 0.24836, 0.0001)\r\n # with correction\r\n self.assertFloatEqual(G_2_by_2(12, 22, 16, 50, True, False)[0],\r\n 1.30277, 0.0001)\r\n self.assertFloatEqual(G_2_by_2(12, 22, 16, 50, True, False)[1],\r\n 0.25371, 0.0001)",
"def test_ks_test(mode):\n indices = np.random.randint(0, 1000, 1000)\n out = compute_indices_ks_test(indices, 1000, mode=mode)\n assert all([o > 0.0 for o in out])",
"def ks_test(x, y=None, alt=\"two sided\", exact=None, warn_for_ties=True):\r\n # translation from R 2.4\r\n num_x = len(x)\r\n num_y = None\r\n x = zip(x, zeros(len(x), int))\r\n lo = [\"less\", \"lo\", \"lower\", \"l\", \"lt\"]\r\n hi = [\"greater\", \"hi\", \"high\", \"h\", \"g\", \"gt\"]\r\n two = [\"two sided\", \"2\", 2, \"two tailed\", \"two\", \"two.sided\"]\r\n Pval = None\r\n # in anticipation of actually implementing the 1-sample cases\r\n if y is not None:\r\n num_y = len(y)\r\n y = zip(y, ones(len(y), int))\r\n n = num_x * num_y / (num_x + num_y)\r\n combined = x + y\r\n if len(set(combined)) < num_x + num_y:\r\n ties = True\r\n else:\r\n ties = False\r\n\r\n combined = array(combined, dtype=[('stat', float), ('sample', int)])\r\n combined.sort(order='stat')\r\n cumsum = zeros(combined.shape[0], float)\r\n scales = array([1 / num_x, -1 / num_y])\r\n indices = combined['sample']\r\n cumsum = scales.take(indices)\r\n cumsum = cumsum.cumsum()\r\n if exact is None:\r\n exact = num_x * num_y < 1e4\r\n\r\n if alt in two:\r\n stat = max(fabs(cumsum))\r\n elif alt in lo:\r\n stat = -cumsum.min()\r\n elif alt in hi:\r\n stat = cumsum.max()\r\n else:\r\n raise RuntimeError(\"Unknown alt: %s\" % alt)\r\n if exact and alt in two and not ties:\r\n Pval = 1 - psmirnov2x(stat, num_x, num_y)\r\n else:\r\n raise NotImplementedError\r\n\r\n if Pval is None:\r\n if alt in two:\r\n Pval = 1 - pkstwo(sqrt(n) * stat)\r\n else:\r\n Pval = exp(-2 * n * stat ** 2)\r\n\r\n if ties and warn_for_ties:\r\n warnings.warn(\"Cannot compute correct KS probability with ties\")\r\n\r\n try: # if numpy arrays were input, the Pval can be an array of len==1\r\n Pval = Pval[0]\r\n except (TypeError, IndexError):\r\n pass\r\n return stat, Pval",
"def test_pk2x(self):\n assert_allclose(pkolmogorov2x(0.7199, 50), (1 - 6.661e-16), rtol=1e-5)\n assert_allclose(pkolmogorov2x(0.08, 30), 0.01754027, rtol=1e-5)\n assert_allclose(pkolmogorov2x(0.03, 300), 0.05753413, rtol=1e-5)",
"def ks_test(timeseries):\r\n\r\n hour_ago = time() - 3600\r\n ten_minutes_ago = time() - 600\r\n reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])\r\n probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])\r\n\r\n if reference.size < 20 or probe.size < 20:\r\n return False\r\n\r\n ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)\r\n\r\n if ks_p_value < 0.05 and ks_d > 0.5:\r\n adf = sm.tsa.stattools.adfuller(reference, 10)\r\n if adf[1] < 0.05:\r\n return True\r\n\r\n return False",
"def ks_test(a,b):\n a,b = np.asarray(a),np.asarray(b)\n if len(a) != len(a):\n raise ValueError(\"a and b must have the same size\")\n \n return stats.ks_2samp(a,b)",
"def ks_test(df1, df2):\n p_val_list = []\n stat_list = []\n for element in df1.columns:\n res = stats.ks_2samp(df1[element], df2[element])\n p_val_list.append(res[1])\n stat_list.append(res[0])\n n = np.argmax(stat_list)\n p_val = p_val_list[n]\n stat = stat_list[n]\n return p_val, stat, n, p_val_list, stat_list",
"def _testDifficultCases():\n files = getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/')\n RunData([files[0], ], out='I800nm50k', wavelength='l800')\n # ============================================================\n # Fitting with MCMC:\n # ******************** Fitted parameters ********************\n # peak = 1.876476e+05 +- 5.178195e+03\n # center_x = 1.013078e+01 +- 5.653792e-02\n # center_y = 9.747014e+00 +- 7.721453e-02\n # radius = 4.660135e-01 +- 7.392010e-03\n # focus = 4.625621e-01 +- 2.905099e-02\n # width_x = 1.339326e-01 +- 3.069773e-02\n # width_y = 1.685128e-01 +- 2.864525e-02\n # ============================================================\n # GoF: 63.013339033 Maximum difference: 27583.5231737\n #\n # FIT UNLIKELY TO BE GOOD...\n #\n # Amplitude estimate: 908507.619723\n # ============================================================\n # FWHM (requirement 10.8 microns):\n # 4.25 +/- 0.838 microns\n # x: 3.78 +/- 0.867 microns\n # y: 4.76 +/- 0.809 microns\n # ============================================================\n\n\n files = getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/')\n RunData([files[0], ], out='I800nm20k', wavelength='l800')\n #Kernel probably too narrow...\n # ============================================================\n # Fitting with MCMC:\n # ******************** Fitted parameters ********************\n # peak = 6.347887e+04 +- 7.413094e+02\n # center_x = 1.010584e+01 +- 5.058798e-02\n # center_y = 9.761372e+00 +- 5.927853e-02\n # radius = 4.584184e-01 +- 3.703650e-03\n # focus = 4.391148e-01 +- 7.021983e-03\n # width_x = 1.519947e-01 +- 3.559739e-02\n # width_y = 1.838032e-01 +- 4.041398e-02\n # ============================================================\n # GoF: 3.18858069585 Maximum difference: 1676.70415821\n # Amplitude estimate: 242256.307521\n # ============================================================\n # FWHM (requirement 10.8 microns):\n # 4.72 +/- 1.072 microns\n # x: 4.3 +/- 1.006 microns\n # y: 5.19 +/- 1.142 microns\n # ============================================================",
"def lks_2samp (data1,data2):\r\n j1 = 0\r\n j2 = 0\r\n fn1 = 0.0\r\n fn2 = 0.0\r\n n1 = len(data1)\r\n n2 = len(data2)\r\n en1 = n1\r\n en2 = n2\r\n d = 0.0\r\n data1.sort()\r\n data2.sort()\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if math.fabs(dt) > math.fabs(d):\r\n d = dt\r\n try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = ksprob((en+0.12+0.11/en)*abs(d))\r\n except:\r\n prob = 1.0\r\n return d, prob",
"def _ks_2samp_fast(prepared_data1, data2, prepared_weights1, weights2, F1):\n indices = numpy.searchsorted(prepared_data1, data2)\n weights2 /= numpy.sum(weights2)\n prepared_weights2 = numpy.bincount(indices, weights=weights2, minlength=len(prepared_data1))\n F2 = compute_cdf(prepared_weights2)\n return numpy.max(numpy.abs(F1 - F2))",
"def ks_2samp(a, b, aw=None, bw=None):\n\n # Methodology for weighted Kolmogorov-Smirnov test taken from Numerical\n # Methods of Statistics - J. Monahan\n\n ab = np.sort(np.concatenate((a, b)))\n\n D = np.max(np.abs(ecdf(a, aw)(ab) - ecdf(b, bw)(ab)))\n\n n1 = len(a) if aw is None else np.sum(aw) ** 2 / np.sum(aw ** 2)\n n2 = len(b) if bw is None else np.sum(bw) ** 2 / np.sum(bw ** 2)\n\n en = np.sqrt(n1 * n2 / float(n1 + n2))\n\n p = kstwobign.sf((en + 0.12 + 0.11 / en) * D) # Stephens (1970)\n\n return D, p",
"def test2Samp():\n\n sigmax = 1.0\n sigmay = 3.0\n mux = 0.0\n muy = 3.0\n nx = 10\n ny = 10\n # Update\n np.random.RandomState(0) # set seed to 0\n datax = sigmax * np.random.randn(nx) + mux\n datay = sigmay * np.random.randn(ny) + muy\n datadict = {'x': datax, 'y': datay}\n ranksums(datadict, dataLabel='Test Rank Sums (scipy)')\n ranksums(datadict, dataLabel='Test Rank Sums, Paired (scipy)', paired=True)\n ttest(datadict, dataLabel='Standard t-test (scipy)', \n textline=True, decimals=3, units='mV')\n ttest(datadict, dataLabel='Standard t-test (scipy), paired', paired=True,\n textline=True, decimals=3)\n (p, n) = permTS(datadict, dataLabel='R permTS')\n permutation(datadict, dataLabel='Test simple permute')\n KS(datadict, dataLabel='Test with KS')",
"def test_multi_taper_psd_csd():\r\n\r\n N = 2 ** 10\r\n n_reps = 10\r\n\r\n psd = []\r\n est_psd = []\r\n est_csd = []\r\n for jk in [True, False]:\r\n for k in range(n_reps):\r\n for adaptive in [True, False]:\r\n ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)\r\n ar_seq -= ar_seq.mean()\r\n fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha],\r\n n_freqs=N)\r\n psd.append(2 * (hz * hz.conj()).real)\r\n f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=adaptive,\r\n jackknife=jk)\r\n est_psd.append(psd_mt)\r\n f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),\r\n adaptive=adaptive)\r\n # Symmetrical in this case, so take one element out:\r\n est_csd.append(csd_mt[0][1])\r\n\r\n fxx = np.mean(psd, axis=0)\r\n fxx_est1 = np.mean(est_psd, axis=0)\r\n fxx_est2 = np.mean(est_csd, axis=0)\r\n\r\n # Tests the psd:\r\n psd_ratio1 = np.mean(fxx_est1 / fxx)\r\n npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)\r\n # Tests the csd:\r\n psd_ratio2 = np.mean(fxx_est2 / fxx)\r\n npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)",
"def test_pkstwo(self):\n assert_allclose(pkstwo(2.3), [1 - 5.084e-05], rtol=1e-5)",
"def test_genx(nsd, backend):\n # NCOLS of data:\n # 2 - test kernel only\n # 3 - test kernel and chi2 calculation\n # 4 - test resolution smearing and chi2 calculation\n\n test_name, slabs, data = nsd\n\n kernel_test(slabs, data, backend)",
"def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )",
"def test_hky_nielsen(self):\n distribution = {'A':.2,'C':.3,'G':.3,'T':.2}\n kappa = 2\n rate_matrix_object = RateMatrix.get_unscaled_hky85_rate_matrix(distribution, kappa)\n rate_matrix_object.normalize()\n rate_matrix = rate_matrix_object.get_dictionary_rate_matrix()\n path_length = 2\n initial_state = 'A'\n terminal_state = 'C'\n states = 'ACGT'\n iterations = 200\n rejection_changes = []\n i = 0\n while i < iterations:\n rejection_events = get_rejection_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if rejection_events is not None:\n rejection_changes.append(len(rejection_events))\n i += 1\n nielsen_changes = []\n i = 0\n while i < iterations:\n nielsen_events = get_nielsen_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if nielsen_events is not None:\n nielsen_changes.append(len(nielsen_events))\n i += 1\n t, p = scipy.stats.mannwhitneyu(rejection_changes, nielsen_changes)\n self.failIf(p < .001)",
"def test_cmi_knn1(means, stds, rho):\n cov = np.array([[stds[0]**2, stds[0] * stds[1] * rho, 0],\n [stds[0] * stds[1] * rho, stds[1]**2, 0],\n [0, 0, stds[2]**2]])\n n = 150000\n data = np.random.multivariate_normal(means, cov, n)\n mi = total_correlation_ksg(data, [[0], [1]], [2])\n assert mi == pytest.approx(-np.log2(1 - rho**2) / 2, abs=1e-1)",
"def _two_sample_kolmogorov_smirnov_pmf(\n pmf1: np.ndarray, pmf2: np.ndarray, alpha: float = 0.05\n) -> Tuple[float, float, bool]:\n # note: yields different results as `scipy.stats.ks_2samp`\n\n cdf1 = np.cumsum(pmf1)\n cdf2 = np.cumsum(pmf2)\n\n n1 = cdf1[-1]\n n2 = cdf2[-1]\n\n # cannot be inplace because of type conversion\n cdf1 = cdf1 / n1\n cdf2 = cdf2 / n2\n\n statistic, pvalue = _two_sample_kolmogorov_smirnov_same_length(cdf1, cdf2, n1, n2)\n reject = pvalue < alpha\n return statistic, pvalue, reject",
"def test_mi_knn1(means, stds, rho):\n cov = np.array([[stds[0]**2, stds[0] * stds[1] * rho], [stds[0] * stds[1] * rho, stds[1]**2]])\n n = 100000\n data = np.random.multivariate_normal(means, cov, n)\n mi = total_correlation_ksg(data, [[0], [1]])\n assert mi == pytest.approx(-np.log2(1 - rho**2) / 2, abs=1e-1)",
"def test_kendall(self):\r\n # test from pg. 594 Sokal and Rohlf, Box 15.7\r\n v1 = [\r\n 8.7,\r\n 8.5,\r\n 9.4,\r\n 10,\r\n 6.3,\r\n 7.8,\r\n 11.9,\r\n 6.5,\r\n 6.6,\r\n 10.6,\r\n 10.2,\r\n 7.2,\r\n 8.6,\r\n 11.1,\r\n 11.6]\r\n v2 = [\r\n 5.95, 5.65, 6.00, 5.70, 4.70, 5.53, 6.40, 4.18, 6.15, 5.93, 5.70, 5.68,\r\n 6.13, 6.30, 6.03]\r\n obs_tau = kendall(v1, v2)\r\n obs_prob = kendall_pval(obs_tau, len(v1))\r\n exp_tau = 0.49761335152811925\r\n exp_prob = 0.0097188572446995618\r\n self.assertFloatEqual(obs_tau, exp_tau)\r\n self.assertFloatEqual(obs_prob, exp_prob)\r\n # random vectors checked against scipy. v1 has 33 ties, v2 32\r\n v1 = array(\r\n [1.2, 9.7, 8.8, 1.7, 8.6, 9.9, 6.8, 7.3, 5.5, 5.4, 8.3,\r\n 3.6, 7.5, 2., 9.3, 5.1, 8.4, 0.3, 8.2, 2.4, 9.8, 8.5,\r\n 2.1, 6., 1.8, 3.7, 1.4, 4.6, 7.6, 5.2, 0.9, 5.2, 4.7,\r\n 2.9, 5., 6.9, 1.3, 6.7, 5.2, 2.4, 6.9, 2., 7.4, 0.4,\r\n 8.2, 9.5, 2.9, 5.7, 2.4, 8.8, 1.6, 3.5, 5.1, 3.6, 3.3,\r\n 7.5, 0.9, 9.3, 5.4, 6.9, 9.3, 2.3, 1.9, 8.1, 3.2, 4.2,\r\n 8.7, 3., 9.8, 5.3, 6.2, 4.8, 9., 2.8, 5.5, 8.4, 4.1,\r\n 5.6, 5.4, 6.9, 3.8, 2.7, 0.3, 3.9, 8.2, 6.6, 1.9, 3.9,\r\n 2., 4.4, 0.8, 6.5, 4.8, 1.5, 9.9, 9.1, 9.9, 6.2, 2.9,\r\n 2.])\r\n v2 = array([6.6, 8.6, 3.9, 6.1, 0.9, 8.4, 10., 3.3, 0.4,\r\n 3.9, 7.6, 8.2, 8.6, 3., 6.9, 0.6, 8.4, 8.1,\r\n 6.3, 0.5, 5.2, 6.4, 8., 9.9, 1.2, 6.7, 8.4,\r\n 2.7, 8.4, 4.1, 4.6, 5.1, 5.2, 5.3, 2.2, 2.2,\r\n 4.3, 7.1, 1.4, 6.6, 7.6, 4.5, 7.8, 3.5, 7.1,\r\n 0.6, 4.6, 3.2, 2.2, 0.2, 3.9, 5.9, 7.7, 8.8,\r\n 1.3, 5.1, 5.6, 8.3, 8.8, 1.7, 5.2, 6.9, 1.3,\r\n 1.4, 4.9, 9.4, 2.3, 3.7, 9.1, 3.4, 1.6, 4.1,\r\n 9.7, 2.8, 9.9, 0.5, 2., 2.7, 3.3, 2.4, 3.6,\r\n 7.9, 6.5, 7., 4.2, 1.8, 1.6, 1.9, 5.5, 0.,\r\n 1.4, 2.2, 7.2, 8.2, 1.1, 2.5, 5.3, 0.2, 9., 0.2])\r\n exp_tau, exp_prob = (0.024867511238807951, 0.71392573687923555)\r\n obs_tau = kendall(v1, v2)\r\n obs_prob = kendall_pval(obs_tau, len(v1))\r\n self.assertFloatEqual(obs_tau, exp_tau)\r\n self.assertFloatEqual(obs_prob, exp_prob)",
"def KS(dataDict=None, dataLabel='data', mode='two.sided'):\n\n # test calling values\n modes = ['two.sided', 'less', 'greater']\n if mode not in modes:\n raise ValueError('RSTATS.KS: mode must be in: ', modes)\n if (dataDict is None or not isinstance(dataDict, dict) \n or len(dataDict.keys()) != 2):\n raise ValueError('RSTATS.KS: dataDict must be a dictionary with '\n + 'exactly 2 keys')\n\n labels = list(dataDict.keys())\n# NGroups = len(labels)\n cmdx = 'X=c(%s)' % ', '.join(str(x) for x in dataDict[labels[0]])\n cmdy = 'Y=c(%s)' % ', '.join(str(y) for y in dataDict[labels[1]])\n# package \"perm\" not available on mac os x, use coin instead\n# importr('perm')\n robjects.r(cmdx)\n robjects.r(cmdy)\n\n# (pvalue, nmc) = permutation(dataDict, dataDict.keys())\n\n u = robjects.r(\"ks.test(X, Y, alternative='%s')\" % mode)\n\n pvalue = float(u[1][0])\n statvalue = float(u[0][0]) # get diff estimate\n if dataLabel is not None:\n print ('\\nKolmogorov-Smirnov test. Dataset = %s' % (dataLabel))\n print(u' Test statistic: {:8.4f}'.format(statvalue))\n print(u' p={:8.6f}, [mode={:s}]'.format(float(pvalue), mode))\n return pvalue",
"def test_G_2_by_2_1tailed_examples(self):\r\n # first up...the famous arginine case\r\n self.assertFloatEqualAbs(G_2_by_2(36, 16, 38, 106), (29.111609, 0),\r\n 0.00001)\r\n # then some other miscellaneous positive and negative values\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(0, 52, 12, 132), (-7.259930, 0.996474),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 14, 130), (-0.000481, 0.508751),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 36, 108), (-6.065167, 0.993106),\r\n 0.00001)",
"def test_chi_2(self):\n NN = 4\n w = 0.01\n kz = 1.0e-7\n kp = 3.0e-7\n wp = 2.0e3\n tz = 2.0e-18\n tp = 1.0e-18\n vthz = 1e5\n vthp = 1e5\n omega = 0.1\n vz = 100.\n\n chi = f_chi(NN, w, kz, kp, wp, tz, tp, vthz, vthp,\n omega, vz, method = 'numpy')/w**2\n expected_chi = np.array([\n [4.972725326e8, 3.812283500e9j, -3.086420946e8],\n [-3.812283500e9j, -3.037753676e9 + 1.098455531e9j, -3.661518437e9-3.465700712e8j],\n [-3.086420946e8, 3.661518437e9+3.465700712e8j, -1.976136342e9 + 2.497184730e10j]])\n npt.assert_allclose(chi, expected_chi, rtol = 1e-7)",
"def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def aks_2samp (data1,data2):\r\n j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE\r\n j2 = 0 # N.zeros(data2.shape[1:])\r\n fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)\r\n fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)\r\n n1 = data1.shape[0]\r\n n2 = data2.shape[0]\r\n en1 = n1*1\r\n en2 = n2*1\r\n d = N.zeros(data1.shape[1:],N.float_)\r\n data1 = N.sort(data1,0)\r\n data2 = N.sort(data2,0)\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if abs(dt) > abs(d):\r\n d = dt\r\n# try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = aksprob((en+0.12+0.11/en)*N.fabs(d))\r\n# except:\r\n# prob = 1.0\r\n return d, prob"
] | [
"0.65256953",
"0.63897914",
"0.6337468",
"0.61477834",
"0.6146518",
"0.614462",
"0.612953",
"0.6116342",
"0.60985845",
"0.59907585",
"0.5973902",
"0.59544104",
"0.59449357",
"0.59340596",
"0.58936447",
"0.5890429",
"0.58695716",
"0.58615464",
"0.58401906",
"0.5830212",
"0.5787811",
"0.5775483",
"0.5771422",
"0.5755802",
"0.5752473",
"0.5742036",
"0.5672126",
"0.5659873",
"0.56507665",
"0.5643349"
] | 0.6872264 | 0 |
Precreate N partitions ahead of time according to the specified interval unit and interval. | def postgres_auto_partition(
model: PostgresPartitionedModel,
count: int,
interval_unit: PostgresAutoPartitioningIntervalUnit,
interval: int,
start_from: Optional[date] = None,
using="default",
):
connection = connections[using]
with connection.cursor() as cursor:
table = connection.introspection.get_partitioned_table(
cursor, model._meta.db_table
)
if not table:
raise PostgresAutoPartitioningError(
f"Model {model.__name__}, with table {model._meta.db_table} "
"does not exists in the database. Did you run "
"`python manage.py migrate`?"
)
if table.method != PostgresPartitioningMethod.RANGE:
raise PostgresAutoPartitioningError(
f"Table {table.name} is not partitioned by a range. Auto partitioning "
"only supports partitioning by range."
)
schema_editor = connection.schema_editor()
start_datetime = datetime.now()
if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:
start_datetime = start_datetime.replace(day=1)
elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:
start_datetime = start_datetime - relativedelta(
days=start_datetime.weekday()
)
for _ in range(count):
if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:
end_datetime = start_datetime + relativedelta(months=+interval)
partition_name = start_datetime.strftime("%Y_%b").lower()
elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:
end_datetime = start_datetime + relativedelta(weeks=+interval)
partition_name = start_datetime.strftime("%Y_week_%W").lower()
from_values = start_datetime.strftime("%Y-%m-%d")
to_values = end_datetime.strftime("%Y-%m-%d")
logger = LOGGER.bind(
model_name=model.__name__,
name=partition_name,
from_values=from_values,
to_values=to_values,
)
if start_from and start_datetime.date() < start_from:
start_datetime = end_datetime
logger.info(
"Skipping creation of partition, before specified start date",
start_from=start_from,
)
continue
partition_table_name = schema_editor.create_partition_table_name(
model, partition_name
)
existing_partition = next(
(
table_partition
for table_partition in table.partitions
if table_partition.name == partition_table_name
),
None,
)
if existing_partition:
start_datetime = end_datetime
logger.info("Skipping creation of partition, already exists")
continue
schema_editor.add_range_partition(
model=model,
name=partition_name,
from_values=from_values,
to_values=to_values,
)
logger.info("Created partition")
start_datetime = end_datetime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createPartitions(config, logger):\n databaseConnection, databaseCursor = connectToDatabase(config, logger)\n try:\n for aDatabaseObjectClass in databaseObjectClassListForWeeklyPartitions:\n weekIterator = mondayPairsIteratorFactory(config.startDate, config.endDate)\n aDatabaseObject = aDatabaseObjectClass(logger=logger)\n aDatabaseObject.createPartitions(databaseCursor, weekIterator)\n databaseConnection.commit()\n except:\n databaseConnection.rollback()\n socorro_util.reportExceptionAndAbort(logger)",
"def _create_missing_delta_storage_partitions(self, conn):\n with CodeProfiler() as cp:\n # We deliberately don't create indexes straight away as the first time we create a partition it is\n # likely to be a bulk insert where indexes will hurt performance\n for parent_name, child_name_fn in [(self._notifications_lists_tblname,\n self._notifications_lists_part_tblname),\n (self._exceptions_lists_tblname, self._exceptions_lists_part_tblname)]:\n self._create_operator_partitions(conn,\n parent_tbl_name=parent_name,\n child_name_fn=child_name_fn,\n is_unlogged=False,\n allow_existing=True,\n fillfactor=45)\n\n return -1, cp.duration",
"def setSlicesPerTimepoint(self, n):\n\t\tassert n > 0, \"Slices per timepoint needs to be greater than 0\"\n\t\tprint \"Setting slices per timepoint to \", n\n\t\tself.slicesPerTimepoint = n\n\t\tself.z = n\n\t\tself.readers = []",
"def parition_generator(n):\n # generator\n k = np.zeros(n, dtype=np.dtype(int))\n Z = np.zeros(n, dtype=np.dtype(int))\n h = [float(n)]\n yield(Z)\n while next_partition(Z, k, h) is not None:\n yield(Z)",
"def integer_partitions(n, **kwargs):\n if 'parts' in kwargs:\n parts = sorted(kwargs['parts'], reverse=True)\n custom_parts = True\n else:\n parts = range(n, 0, -1)\n custom_parts = False\n total_number = len(parts)\n\n if 'distinct' in kwargs and kwargs['distinct']:\n distinct = 1\n else:\n distinct = 0\n\n if 'num_parts' in kwargs:\n num_parts = kwargs['num_parts']\n if num_parts > n:\n yield []\n return\n else:\n num_parts = 0\n\n def algorithm_p(n):\n \"\"\"\n Generates all partitions of n. This is Algorithm P from 7.2.1.4 of\n Knuth, Vol. 4.\n \"\"\"\n partition = [0]*n\n last_replaced = 0\n partition[last_replaced] = n\n idx = last_replaced - (n == 1)\n\n while True:\n yield partition[0:last_replaced + 1]\n if idx < 0:\n return\n if partition[idx] == 2:\n partition[idx] = 1\n idx -= 1\n last_replaced += 1\n partition[last_replaced] = 1\n else:\n replacement = partition[idx] - 1\n partition[idx] = replacement\n n = last_replaced - idx + 1\n last_replaced = idx + 1\n while n > replacement:\n partition[last_replaced] = replacement\n last_replaced += 1\n n -= replacement\n partition[last_replaced] = n\n idx = last_replaced - (n == 1)\n\n def algorithm_h(n, m):\n \"\"\"\n Generates all partitions of n into m parts. This is Algorithm H from\n 7.2.1.4 of Knuth, Vol. 4.\n \"\"\"\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s\n\n def backtrack(partial_sum, used, num_used, last_idx):\n if partial_sum == n:\n if not num_parts or (num_parts and num_used == num_parts):\n yield used\n elif partial_sum < n:\n if num_parts and num_used >= num_parts:\n return\n idx = 0\n if last_idx != 0:\n idx = last_idx + distinct\n for i in xrange(idx, total_number):\n part = parts[i]\n for partition in backtrack(partial_sum + part,\n used + [part], num_used + 1, i):\n yield partition\n\n if distinct or custom_parts:\n partition_gen = backtrack(0, [], 0, 0)\n elif not distinct and not custom_parts and num_parts != 0:\n partition_gen = algorithm_h(n, num_parts)\n else:\n partition_gen = algorithm_p(n)\n\n for partition in partition_gen:\n yield partition",
"def partitions(n):\n for a in range(2,n//2+1):\n yield a, n-a",
"def pad_intervals(parts, duration=128):\n part_duration = duration / (parts + 1)\n return [int((i + 1) * part_duration) for i in range(parts)]",
"def partition(data, num_partitions=None, by=None, **kwargs):\n return Component(\n \"Partition\",\n arguments={\n 'data': Component.of(data),\n 'num_partitions': Component.of(num_partitions),\n 'by': Component.of(by)\n },\n options={\n \n },\n constraints=kwargs)",
"def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits",
"def fixed_size_partitioner(num_shards, axis=0):\n def _partitioner(shape, **unused_args):\n partitions_list = [1] * len(shape)\n partitions_list[axis] = min(num_shards, shape.dims[axis].value)\n return partitions_list\n return _partitioner",
"def __partition1(self, lst, n):\n \n if lst is None:\n lst = []\n \n # Create a dictionary for the parted spws:\n rdict = {}\n division = len(lst)/float(n)\n for i in xrange(int(n)):\n part = lst[int(round(division * i)):int(round(division * (i+1)))]\n rdict[i] = part\n \n return rdict",
"def partitions_list(n):\r\n p = IntegerPartition([n])\r\n w = []\r\n while list(p.args[1]) not in w:\r\n w.append(list(p.args[1]))\r\n p = p.next_lex()\r\n return w",
"def __partition(self, lst, n):\n \n if lst is None:\n lst = []\n \n division = len(lst)/float(n)\n \n return [ lst[int(round(division * i)):\n int(round(division * (i+1)))] for i in xrange(int(n))]",
"def partition_all(n):\n def _partition_all_xducer(step):\n outer = {\"temp\": []}\n\n def _partition_all_step(r=Missing, x=Missing):\n if r is Missing: return step()\n\n # arity 1: called on completion.\n if x is Missing:\n if not outer[\"temp\"]:\n return r\n _temp = outer[\"temp\"][:]\n del outer[\"temp\"][:]\n _r = unreduced(step(r, _temp))\n return step(_r)\n\n # arity 2: called w/each reduction step.\n outer[\"temp\"].append(x)\n if len(outer[\"temp\"]) == n:\n _temp = outer[\"temp\"][:]\n del outer[\"temp\"][:]\n return step(r, _temp)\n return r\n\n return _partition_all_step\n return _partition_all_xducer",
"def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts",
"def partition(seq):\n\n return 0",
"def set_partition(self, begin=0, end=0):\r\n self.partition = (begin, end)",
"def _createOwnPartition(self, databaseCursor, uniqueItems):\n self.logger.debug(\"%s - in createOwnPartition for %s\",threading.currentThread().getName(),self.name)\n for x in uniqueItems:\n #self.logger.debug(\"DEBUG - item value is %s\",x)\n partitionCreationParameters = self.partitionCreationParameters(x)\n partitionName = self.partitionNameTemplate % partitionCreationParameters[\"partitionName\"]\n if partitionWasCreated(partitionName):\n #self.logger.debug(\"DEBUG - skipping creation of %s\",partitionName)\n continue\n partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters\n #self.logger.debug(\"%s - Sql for %s is %s\",threading.currentThread().getName(),self.name,partitionCreationSql)\n aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)\n self.logger.debug(\"%s - savepoint createPartitions_%s\",threading.currentThread().getName(), partitionName)\n databaseCursor.execute(\"savepoint createPartitions_%s\" % partitionName)\n try:\n self.logger.debug(\"%s - creating %s\", threading.currentThread().getName(), partitionName)\n aPartition._createSelf(databaseCursor)\n markPartitionCreated(partitionName)\n self.logger.debug(\"%s - successful - releasing savepoint\", threading.currentThread().getName())\n databaseCursor.execute(\"release savepoint createPartitions_%s\" % partitionName)\n except pg.ProgrammingError, x:\n self.logger.debug(\"%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s\", threading.currentThread().getName(), partitionName, str(x).strip())\n databaseCursor.execute(\"rollback to createPartitions_%s; release savepoint createPartitions_%s;\" % (partitionName, partitionName))\n databaseCursor.connection.commit()",
"def _tokens_partitions(tokens, min_number_of_tokens, number_of_partitions):\n if len(tokens) < min_number_of_tokens:\n # In this case we have few token and thus we split them\n tkns_per_partition = min_number_of_tokens / number_of_partitions\n step_size = ((2 ** 64) - 1) / min_number_of_tokens\n partition = []\n for fraction, to in tokens:\n while fraction < to - step_size:\n partition.append((fraction, fraction + step_size))\n fraction += step_size\n if len(partition) >= tkns_per_partition:\n yield partition\n partition = []\n # Adding the last token\n partition.append((fraction, to))\n if len(partition) > 0:\n yield partition\n else:\n # This is the case we have more tokens than partitions,.\n splits = max(len(tokens) / number_of_partitions, 1)\n\n for i in xrange(0, len(tokens), splits):\n yield tokens[i:i + splits]\n if len(tokens) % splits > 0:\n yield tokens[len(tokens) / splits * splits + 1:]",
"def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists",
"def n_step_scheduler(step_interval: int) -> Scheduler:\n if step_interval <= 0:\n raise ValueError(f'step_interval must be positive, got {step_interval}')\n\n def f(unused_data: step_data.StepData) -> bool:\n should_log = f.step_counter % step_interval == 0\n f.step_counter += 1\n return should_log\n\n f.step_counter = 0\n return f",
"def create_time_slices(weeks, lookback, horizon, gap,\n step_size, holdout_window, num_steps):\n\n n = len(weeks)\n min_week = min(weeks)\n holdout_gap = horizon + gap - 1 # gap between train and holdout set\n holdout_size = horizon + holdout_window - 1\n step_space = (num_steps - 1) * step_size\n\n training_window = n - lookback - holdout_gap - holdout_size - step_space\n\n if training_window <= 0:\n err_msg = \"negative window size using specified parameters\"\n logging.error(err_msg)\n raise Exception(err_msg)\n\n def create_time_slice(step=0):\n base = min_week + lookback + step\n time_slice = (\n [base + x for x in range(training_window)],\n [base + x + holdout_gap + training_window\n for x in range(holdout_window)]\n )\n return time_slice\n\n output = [create_time_slice(x*step_size) for x in range(0, num_steps)]\n\n return output",
"def partition(self, lst, n):\n division = len(lst) / float(n)\n return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n)]",
"def createPartitions(self, databaseCursor, iterator):\n self.logger.debug(\"%s - in createPartitions\", threading.currentThread().getName())\n partitionTableClasses = getOrderedPartitionList([self.__class__])\n #self.logger.debug(\"DEBUG - Classes are %s\",partitionTableClasses)\n uniqueItems = [x for x in iterator]\n for tableClass in partitionTableClasses:\n tableObject = self\n if not self.__class__ == tableClass:\n tableObject = tableClass(logger = self.logger)\n #self.logger.debug(\"DEBUG - Handling %s /w/ sql %s\",tableObject.name,tableObject.partitionCreationSqlTemplate)\n tableObject._createOwnPartition(databaseCursor,uniqueItems)",
"def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p",
"def create_train_state_partition_spec(\n element_spec,\n *,\n model,\n optim,\n target_params_update_fn,\n):\n # Create partition specs\n # Start with evaluating the shape of the states\n train_state_shape = jax.eval_shape(\n functools.partial(\n create_train_state,\n model=model,\n optim=optim,\n target_params_update_fn=target_params_update_fn,\n rng=jax.random.PRNGKey(0)), element_spec)\n\n return create_train_state_partition_spec_from_shape(train_state_shape)",
"def equal_interval_creation(\n particles,\n fields,\n time_intervals,\n time_horizon,\n time_passed):\n ret = []\n time_ratio = time_horizon / time_passed\n for i in range(1, time_intervals + 1):\n w = {}\n for key in particles.keys():\n if (key in fields) | (fields is None):\n x = particles[key]\n # N0 = int(x.shape[0] * time_ratio * ((i - 1) / time_intervals))\n N1 = int(x.shape[0] * time_ratio * (i / time_intervals))\n N0 = int(N1 * 0.1)\n w[key] = x[N0:N1]\n t_i = time_passed * (N1 / x.shape[0])\n w = {'particles': w}\n w['time'] = t_i\n w['epoch'] = i\n ret.append(w)\n return ret",
"def generatePreKeys(start, count):\n results = []\n start -= 1\n for i in range(0, count):\n preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1\n results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair()))\n\n return results",
"def test_partition(self):\n # one swap at the end\n list = [5, 6, 7, 8, 9, 2]\n partition(list, 0, 5)\n # assert list == [2, 6, 7, 8, 9, 5] # should be improved in future",
"def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments"
] | [
"0.593766",
"0.5498116",
"0.54152864",
"0.53877294",
"0.53830886",
"0.53789216",
"0.5308101",
"0.52501756",
"0.52337325",
"0.5225058",
"0.52231616",
"0.51973605",
"0.51917195",
"0.5139701",
"0.5094759",
"0.50867045",
"0.508078",
"0.5067348",
"0.5050254",
"0.50363874",
"0.50242627",
"0.50173205",
"0.5013433",
"0.5006731",
"0.49950364",
"0.4971181",
"0.4953473",
"0.492318",
"0.48986933",
"0.48950753"
] | 0.65793604 | 0 |
Render Hebrew in a dataframe. | def pretty_hebrew(val):
return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disp(df):\n display(HTML(df.to_html(index=False)))",
"def df_to_html(df, percentage_columns=None): # pragma: no cover\n big_dataframe_setup()\n try:\n res = \"<br><h2> {} </h2>\".format(df.name)\n except AttributeError:\n res = \"\"\n df.style.set_properties(**{\"text-align\": \"center\"})\n res += df.to_html(\n formatters=_formatters_dict(\n input_df=df, percentage_columns=percentage_columns\n )\n )\n res += \"<br>\"\n return res",
"def Get_Hebrew_Stop_Words():\r\n # Explain Of The Function #\r\n\r\n return ['אני', 'את', 'אתה', 'אנחנו', 'אתן', 'אתם', 'הם', 'הן', 'היא', 'הוא', 'שלי', 'שלו', 'שלך', 'שלה', 'שלנו',\r\n 'שלכם', 'שלכן', 'שלהם', 'עד', 'אשר', 'במידה', 'שוב', 'יותר',\r\n 'שלהן', 'לי', 'לו', 'לה', 'לנו', 'לכם', 'לכן', 'להם', 'להן', 'אותה', 'אותו', 'זה', 'זאת', 'אלה', 'אלו',\r\n 'תחת', 'מתחת', 'מעל', 'בין', 'מכיוון', 'יכולה', 'יכולים', 'יכולות',\r\n 'עם', 'עד', 'נגר', 'על', 'אל', 'מול', 'של', 'אצל', 'כמו', 'אחר', 'אותו', 'בלי', 'לפני', 'אחרי',\r\n 'מאחורי', 'עלי', 'עליו', 'עליה', 'הסיבה שבגללה', 'למה', 'אלו', 'אף', 'על', 'מעל',\r\n 'עליך', 'עלינו', 'עליכם', 'לעיכן', 'עליהם', 'עליהן', 'כל', 'כולם', 'כולן', 'כך', 'ככה', 'כזה', 'זה',\r\n 'זאת', 'אותי', 'אותה', 'אותם', 'איתן', 'איתנו', 'עצמן', 'עצמנו', 'מהיכן', 'מן', 'לעבר', 'מכאן',\r\n 'אותך', 'אותו', 'אותן', 'אותנו', 'ואת', 'את', 'אתכם', 'אתכן', 'איתי', 'איתו', 'איתך', 'איתה', 'איתם',\r\n 'איתכם', 'איתכן', 'יהיה', 'תהיה', 'היתי', 'היתה', 'היה', 'להיות', 'עצמי', 'עצמו', 'עצמה', 'עצמם',\r\n 'עצמהם', 'עצמהן', 'מי', 'מה', 'איפה', 'היכן', 'במקום שבו', 'אם', 'לאן', 'למקום שבו', 'מקום בו', 'איזה',\r\n 'איך', 'כיצד', 'באיזו מידה', 'מתי', 'בשעה ש', 'כאשר', 'כש', 'למרות', 'לפני', 'אחרי', 'מאיזו סיבה',\r\n 'מדוע', 'לאיזו תכלית', 'כי', 'יש', 'אין', 'אך', 'מנין', 'מאין', 'מאיפה', 'יכל', 'יכלה', 'יכלו', 'יכול',\r\n 'יוכלו', 'יוכל', 'מסוגל', 'לא', 'רק', 'אולי', 'אין', 'לאו', 'אי', 'כלל', 'נגד', 'אם', 'עם', 'אל', 'אלה',\r\n 'מתחת', 'מצד', 'בשביל', 'לבין', 'באמצע', 'בתוך', 'דרך', 'מבעד', 'באמצעות', 'למעלה', 'למטה', 'מחוץ',\r\n 'כאן', 'הנה', 'הרי', 'פה', 'שם', 'אך', 'ברם', 'שוב', 'אבל', 'מבלי', 'בלי', 'מלבד', 'רק', 'בגלל',\r\n 'ואילו', 'למרות', 'אס', 'כמו', 'כפי', 'אז', 'אחרי', 'כן', 'לכן', 'לפיכך', 'מאד', 'עז', 'מעט', 'מעטים',\r\n 'מדי', 'גם', 'כן', 'נו', 'אחר', 'אחרת', 'אחרים', 'אחרות', 'אשר', 'או']\r\n pass",
"def patchPandasHTMLrepr(self, **kwargs):\n global defHTMLFormatter_write_cell\n global defPandasGetAdjustment\n\n # Two things have to be done:\n # 1. Disable escaping of HTML in order to render img / svg tags\n # 2. Avoid truncation of data frame values that contain HTML content\n\n # The correct patch requires that two private methods in pandas exist. If\n # this is not the case, use a working but suboptimal patch:\n def patch_v1():\n with pd.option_context('display.max_colwidth', -1): # do not truncate\n kwargs['escape'] = False # disable escaping\n return defPandasRendering(self, **kwargs)\n\n try:\n import pandas.io.formats.html # necessary for loading HTMLFormatter\n except:\n # this happens up until at least pandas v0.22\n return patch_v1()\n else:\n if not hasattr(pd.io.formats.html, 'HTMLFormatter') or \\\n not hasattr(pd.io.formats.html.HTMLFormatter, '_write_cell') or \\\n not hasattr(pd.io.formats.format, '_get_adjustment'):\n return patch_v1()\n\n # The \"clean\" patch:\n # 1. Temporarily set escape=False in HTMLFormatter._write_cell\n defHTMLFormatter_write_cell = pd.io.formats.html.HTMLFormatter._write_cell\n\n # 2. Pandas uses TextAdjustment objects to measure the length of texts\n # (e.g. for east asiacopied over from rdkit repo yn languages). We take advantage of this mechanism\n # and replace the original text adjustment object with a custom one.\n # This \"RenderMoleculeAdjustment\" object assigns a length of 0 to a\n # given text if it is valid HTML. And a value having length 0 will not\n # be truncated.\n\n # store original _get_adjustment method\n defPandasGetAdjustment = pd.io.formats.format._get_adjustment\n\n try:\n # patch methods and call original to_html function\n pd.io.formats.format._get_adjustment = _patched_get_adjustment\n pd.io.formats.html.HTMLFormatter._write_cell = _patched_HTMLFormatter_write_cell\n return defPandasRendering(self, **kwargs)\n except:\n pass\n finally:\n # restore original methods\n pd.io.formats.format._get_adjustment = defPandasGetAdjustment\n pd.io.formats.html.HTMLFormatter._write_cell = defHTMLFormatter_write_cell\n\n # If this point is reached, an error occurred in the previous try block.\n # Use old patch:\n return patch_v1()",
"def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html",
"def _report(self, canonical: pd.DataFrame, index_header: str, bold: [str, list]=None, large_font: [str, list]=None):\n pd.set_option('max_colwidth', 200)\n pd.set_option('expand_frame_repr', True)\n bold = Commons.list_formatter(bold)\n bold.append(index_header)\n large_font = Commons.list_formatter(large_font)\n large_font.append(index_header)\n style = [{'selector': 'th', 'props': [('font-size', \"120%\"), (\"text-align\", \"center\")]},\n {'selector': '.row_heading, .blank', 'props': [('display', 'none;')]}]\n index = canonical[canonical[index_header].duplicated()].index.to_list()\n canonical.loc[index, index_header] = ''\n canonical = canonical.reset_index(drop=True)\n df_style = canonical.style.set_table_styles(style)\n _ = df_style.set_properties(**{'text-align': 'left'})\n if len(bold) > 0:\n _ = df_style.set_properties(subset=bold, **{'font-weight': 'bold'})\n if len(large_font) > 0:\n _ = df_style.set_properties(subset=large_font, **{'font-size': \"120%\"})\n return df_style",
"def convert_to_tvr_subtitle(df: pd.DataFrame) -> pd.DataFrame:\n\tpass",
"def russian_to_en(df_data):\n tranlate_file = {'region': 'russian_region_names_in_english.csv',\n 'city': 'russian_city_names_in_english.csv',\n 'parent_category_name': 'parent_product_categories.csv',\n 'category_name': 'product_categories.csv',\n 'param_1': 'param_1.csv',\n 'param_2': 'param_2.csv',\n 'param_3': 'param_3.csv'}\n \n # Iterate over input dataframe columns,\n # Use files from: www.kaggle.com/kaparna/translations/data, if possible.\n # For 'description' column, use translate module.\n for col in df_data.columns:\n file = tranlate_file.get(col, None) \n if file: \n # build up the mapping from Russian to English.\n file = pandas.read_csv(PATH+file)\n file.columns = ['rus', 'en']\n convert = {row['rus']: row['en'] for index, row in file.iterrows()}\n \n # translate column into English\n df_data[col] = df_data[col].map(convert, na_action='ignore')",
"def uCSIsHebrew(code):\n ret = libxml2mod.xmlUCSIsHebrew(code)\n return ret",
"def __bytes__(self):\n from pandas.core.config import get_option\n\n encoding = get_option(\"display.encoding\")\n return self.__unicode__().encode(encoding, 'replace')",
"def startrek_characters():\n pdf = pd.DataFrame({\n 'name': [\n 'JONATHAN ARCHER',\n 'Michael Burnham',\n 'Chakotay ',\n ' Data ',\n 'the Doctor',\n 'philippa georgiou',\n 'Jean--Luc PICARD',\n 'Christopher pike '\n ],\n 'rank': [\n 'Captain',\n 'Comander',\n 'Comander',\n 'LT Commander',\n 'None',\n 'Capitain',\n 'Captain',\n 'CAPTAIN',\n ]\n })\n return pdf",
"def standardize_text(df: pd.DataFrame,\r\n text_field: str,\r\n output_field: str) -> pd.DataFrame:\r\n\r\n # df[output_field] = df[text_field].apply(\r\n # lambda column: emoji.get_emoji_regexp().sub(u'', column)\r\n # )\r\n\r\n df[output_field] = df[text_field].str.replace(\"'m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"’m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"´m\", ' am')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"’ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"´ve\", ' have')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"’d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"´d\", ' would')\r\n\r\n df[output_field] = df[output_field].str.replace(\"n't\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n’t\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n´t\", ' not')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"’ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"´ll\", ' will')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'s\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"’\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"´s\", ' is')\r\n\r\n df[output_field] = df[output_field].str.replace('/', ' ')\r\n df[output_field] = df[output_field].str.replace('\\.{2,}', '.')\r\n df[output_field] = df[output_field].str.replace('!{2,}', '!')\r\n df[output_field] = df[output_field].str.replace('\\?{2,}', '?')\r\n df[output_field] = df[output_field].str.replace('€+', '')\r\n df[output_field] = df[output_field].str.replace('[0-9$&~\\\\()[\\]{}<>%\\'\"“”‘’,;…+\\-_=*]+', '')\r\n df[output_field] = df[output_field].str.replace(r'http\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'http', '')\r\n df[output_field] = df[output_field].str.replace(r'@\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'@', 'at')\r\n df[output_field] = df[output_field].str.lower()\r\n df[output_field] = df[output_field].astype(str)\r\n\r\n return df",
"def df2html(df, name=None, dom=\"Brt\", show_index=False, pageLength=15):\n\n if name is None:\n name = uuid.uuid1().time_low\n # looks like datatable does not like ID made of numbers, even in string\n # so we convert to ABCDEFGH values\n name = \"\".join([chr(65 + int(x)) for x in str(name)])\n\n datatable = DataTable(df, name, index=show_index)\n datatable.datatable.datatable_options = {\n \"pageLength\": pageLength,\n \"scrollCollapse\": \"false\",\n \"dom\": dom,\n \"buttons\": [\"copy\", \"csv\"],\n }\n\n # identify links (columns ending in _links)\n df.columns = [str(x) for x in df.columns]\n for column in df.columns:\n if column.endswith(\"_links\"):\n prefix = column.replace(\"_links\", \"\")\n if prefix in df.columns:\n datatable.datatable.set_links_to_column(column, prefix)\n\n js = datatable.create_javascript_function()\n html = datatable.create_datatable(float_format=\"%.6g\")\n return js + html",
"def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore",
"def csv_to_html():\n logging.info(\"Converting csv to html..\")\n df = pd.read_csv(gTAF_config.execution_summary_csv_file)\n df.to_html(gTAF_config.html_report_file)\n htmTable = df.to_html()",
"def csv_to_html(filepath):\r\n df = pd.read_csv(filepath, index_col=0)\r\n html = df.to_html()\r\n return html",
"def ETL(df, col_name= 'headline', class_col_name='category', tok_col_name='tok'):\n # Primer paso: Tokenizacion, stopwords y stemming\n df_tok_clean = tokenization_stopwords_stemming(df, col_name=col_name, tok_col_name=tok_col_name)\n \n # Segundo paso: Crear texto liquido del dataframe limpio\n df_liquid_text = to_liquid_text(df_tok_clean, class_col_name=class_col_name, tok_col_name=tok_col_name)\n \n return df_tok_clean,df_liquid_text",
"def binarize_english(df):\n df['is_english'] = df['original_language'].apply(lambda x: 1 if x == 'en' else 0)\n return df",
"def printunichars(row):\n print(\"Title:\")\n print(row[0].encode('utf-8'))\n print(\"Body:\")\n print(row[1].encode('utf-8'))\n print(\"Ref:\")\n print(row[2].encode('utf-8'))\n print(\"Url:\")\n print(row[3].encode('utf-8'))",
"def render(cls, df: DataFrame, *args, **kwargs):\n from labext.widgets.data_table import DataTable\n dt = DataTable(df, *args, **kwargs)\n display(dt.widget, *dt.get_auxiliary_components())",
"def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)",
"def preprocessing(dataframe):\n dataframe.loc[:, \"content\"] = dataframe.loc[:, \"content\"].str.replace(\"&.*?;\", \"\")\n dataframe.loc[:, \"content\"] = dataframe.loc[:, \"content\"].str.replace(\n r\"[^a-zA-Z\\s.:!?/#\\\"]\", \"\"\n )\n dataframe[\"original_content\"] = dataframe[\"content\"]\n dataframe.loc[:, \"content\"] = dataframe.loc[:, \"content\"].str.lower()\n return dataframe",
"def table_to_html(df, ev, html_id=\"\", add_class=\"\"):\n formatters = ev.getColumnFormatters(df)\n\n # apply sortlevel\n df = ev.sortDataFrame(df)\n\n tableclasses = 'ipet-table rb-table-data {}\" width=\"100%'.format(add_class)\n\n htmlstr = df.to_html(border=0,\n na_rep=NONE_DISPLAY, formatters=formatters, justify=\"right\",\n table_id=html_id, classes=tableclasses)\n\n return html.fromstring(htmlstr)",
"def as_html(self, max_rows=0):\n if not max_rows or max_rows > self.num_rows:\n max_rows = self.num_rows\n omitted = max(0, self.num_rows - max_rows)\n labels = self.column_labels\n lines = [\n (0, '<table border=\"1\" class=\"dataframe\">'),\n (1, '<thead>'),\n (2, '<tr>'),\n (3, ' '.join('<th>' + label + '</th>' for label in labels)),\n (2, '</tr>'),\n (1, '</thead>'),\n (1, '<tbody>'),\n ]\n fmts = [self._formats.get(k, self.format_column(k, v[:max_rows])) for\n k, v in self._columns.items()]\n for row in itertools.islice(self.rows, max_rows):\n lines += [\n (2, '<tr>'),\n (3, ' '.join('<td>' + fmt(v) + '</td>' for v, fmt in zip(row, fmts))),\n (2, '</tr>'),\n (1, '</tbody>'),\n ]\n lines.append((0, '</table>'))\n if omitted:\n lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))\n return '\\n'.join(4 * indent * ' ' + text for indent, text in lines)",
"def byte_to_literal_strings(dataframe):\n # Select the str columns:\n str_df = dataframe.select_dtypes([np.object])\n\n if not str_df.empty:\n # Convert all of them into unicode strings\n str_df = str_df.stack().str.decode('utf-8').unstack()\n # Swap out converted cols with the original df cols\n for col in str_df:\n dataframe[col] = str_df[col]\n\n return dataframe",
"def render(self):\n self.data.update(self.axes.render()) \n encoder = Encoder(self._encoding) \n if not 'chs' in self.data:\n self.data['chs'] = '300x150'\n else: \n size = self.data['chs'].split('x')\n assert(len(size) == 2), 'Invalid size, must be in the format WxH'\n self.check_size(*map(int,size))\n assert('cht' in self.data), 'No chart type defined, use type method'\n self.data['cht'] = self.check_type(self.data['cht']) \n if self._dataset:\n self.data['chd'] = encoder.encode(self._dataset) \n # except: raise IndexError, 'Data encoding went screwy' \n else:\n assert('chd' in self.data), 'You must have a dataset, or use chd' \n if self.scales:\n assert(self.data['chd'].startswith('t:')), 'You must use text encoding with chds'\n self.data['chds'] = ','.join(self.scales)\n if self.bar_heights:\n self.data['chbh'] = self.bar_heights\n if self._geo and self._cc:\n self.data['chtm'] = self._geo\n self.data['chld'] = self._cc\n if self.lines:\n self.data['chls'] = '|'.join(self.lines) \n if self.markers:\n self.data['chm'] = '|'.join(self.markers) \n if self.fills:\n self.data['chf'] = '|'.join(self.fills)",
"def as_DF(self):\n\n hc_df = pd.DataFrame(self.s, index=self.s_names)\n hc_df.columns.name = 'type'\n hc_df.index.name = 's'\n\n return hc_df",
"def get_table_download_link(df):\n val = to_excel(df)\n b64 = base64.b64encode(val) # val looks like b'...'\n return f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"Your_File.xlsx\">Download Excel file</a>' # decode b'abc' => abc",
"def get_table_download_link(df):\r\n\tval = to_excel(df)\r\n\tb64 = base64.b64encode(val) # val looks like b'...'\r\n\treturn f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"extract.xlsx\">Download xlsx file</a>' # decode b'abc' => abc\r",
"def utify_chars(babylex_df):\n babylex_df['root'] = babylex_df['root'].str.replace(\"T\", \"ṭ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"c\", \"š\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"S\", \"ṣ\")\n babylex_df['root'] = babylex_df['root'].str.replace(\"x\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"T\", \"ṭ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"c\", \"š\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"S\", \"ṣ\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"X\", \"'\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"aa\", \"ā\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ee\", \"ē\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"ii\", \"ī\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"uu\", \"ū\")\n\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"A\", \"â\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"E\", \"ê\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"I\", \"î\")\n babylex_df['infinitive'] = babylex_df['infinitive'].str.replace(\"U\", \"û\")\n\n return babylex_df"
] | [
"0.6045315",
"0.55508834",
"0.53905046",
"0.53869677",
"0.5380068",
"0.53508544",
"0.53235275",
"0.5310591",
"0.53040236",
"0.52949053",
"0.5287557",
"0.5267061",
"0.52548164",
"0.51928174",
"0.5151537",
"0.51386535",
"0.5135061",
"0.5115769",
"0.50743735",
"0.5065344",
"0.50451964",
"0.503219",
"0.50077146",
"0.49885416",
"0.49861884",
"0.49773988",
"0.49746948",
"0.49733955",
"0.49528468",
"0.49443364"
] | 0.5721064 | 1 |
Show proportion dataframe with highlighting. | def df_highlighter(pr_df, rule='max'):
if rule == 'max':
return pr_df.style.apply(highlight_max, 1)
elif rule == 'fishers':
return pr_df.style.applymap(highlight_sig)
else:
return pr_df.style | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(self, df, extra=[], spread=25):\n original_shape = df.shape\n df = df[self.default + extra]\n if spread > 0:\n spread_i = get_spread(df.index, spread)\n df = df.loc[spread_i]\n print(f'showing {df.shape[0]} of {original_shape[0]}')\n return df.style.applymap(pretty_hebrew, subset=self.stylize)",
"def format_perc_table(df_perc_diff, label_projs):\n df_perc = df_perc_diff.pivot(index='project', columns='thresh').round(2)['perc_above']\n df_perc.rename(index=label_projs, inplace=True)\n df_perc = df_perc.applymap(lambda x : '{:.2f}%'.format(x))\n return df_perc",
"def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df",
"def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()",
"def highlight_tot(series):\n is_over = series > 1.21\n is_marginal = (series > 1.1) & (is_over != False)\n is_under = series < 0.9\n\n style = []\n for i in range(len(series)):\n if is_over[i]:\n style.append(\"background-color: red\")\n elif is_marginal[i]:\n style.append(\"background-color: orange\")\n elif is_under[i]:\n style.append(\"background-color: lime\")\n else:\n style.append(\"background-color: yellow\")\n\n return style",
"def repetitive(df):\r\n total_rows = df.shape[0] \r\n for col in df.columns:\r\n count = df[col].value_counts(dropna=False)\r\n high_percent = (count/total_rows).iloc[0] \r\n if high_percent > 0.95:\r\n print('{0}: {1:.1f}%'.format(col, high_percent*100))\r\n print(count)\r\n print()",
"def plot_selected(self, df, columns, start_index, end_index):\n util = Utility()\n df = util.normalize_data(df)\n self.plot_data(df.ix[start_index:end_index, columns], \"Bitcoin\")",
"def plot_selected(df, columns, start_index, end_index):\r\n plot_data(df.ix[start_index:end_index, columns], title=\"Selected data\")",
"def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax",
"def _print_stat_rows(title,rows_before,rows_after):\n self.strprint(str(title)+\" : Percent of processed rows = %1.2F\"\\\n %(np.abs(rows_before-rows_after)*100/rows_before))",
"def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")",
"def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return",
"def analysis_of_dataframe(self, dataframe):\n\t\ttypes = self.data.type.unique()\n\t\tratings = self.data.rating.unique()\n\n\t\tprint \"\"\n\n\t\t# First analysis section\n\t\tfor rating in ratings:\n\t\t\tpercentage = format(self.data.rating.value_counts()[rating] / len(self.data.index), '.6f')\n\n\t\t\t# Print probability data\n\t\t\tprint \"Prob(rating={}) = {}\".format(rating, percentage)\n\n\t\tprint \"\"\n\n\t\t# Second analysis section\n\t\tfor rating in ratings:\n\t\t\tfor type in types:\n\n\t\t\t\t# Get sub-set dataframe\n\t\t\t\ttemp_dataframe = self.data[self.data['rating'] == rating]\n\n\t\t\t\t# Get conditional probability\n\t\t\t\ttry:\n\t\t\t\t\tpercentage = format(temp_dataframe.type.value_counts()[type] / len(temp_dataframe.index), '.6f')\n\n\t\t\t\t# Current type not found in temp_dataframe\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpercentage = format(0, '.6f')\n\n\t\t\t\t# Print probability data\n\t\t\t\tfinally:\n\t\t\t\t\tprint \"Prob(type={}|rating={}) = {}\".format(type, rating, percentage)",
"def print_info(df):\n\n # Data statistics\n # Number of total samples\n print('There are {n_samples} samples in total.'.format(n_samples=len(list(df.index.get_level_values(0).unique()))))\n\n # Count the different types of labels\n unique = df['label'].unique()\n count = []\n\n for label in unique:\n count.append(len(df.index.get_level_values(0)[df['label'] == label].unique()))\n\n count_dict = {unique[i]: count[i] for i in range(len(unique))}\n count_dict_percentage = {\n unique[i]: np.round(count[i] / len(list(df.index.get_level_values(0).unique())), decimals=2)\n for i in range(len(unique))}\n\n print('The types and counts of different labels : \\n {count_dict}'.format(count_dict=count_dict))\n print('The types and counts of different labels as percentage of the total data'\n ' : \\n {count_dict}'.format(count_dict=count_dict_percentage))",
"def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)",
"def disp(df):\n display(HTML(df.to_html(index=False)))",
"def show_data(df):\n printmd(str(\"The Data contains **\" + str(df.shape[0])+ '** rows.'))\n printmd(\"*__Sample of the data :__*\")\n display(df.head(n=5))\n print(\"\")\n print(\"\")",
"def overview_report(df, cutoff=LIMIT_SIMILARITY_L / 100,\n highlight=False, mode=\"cpd\"):\n cpp.load_resource(\"SIM_REFS\")\n sim_refs = cpp.SIM_REFS\n detailed_cpds = []\n if isinstance(df, cpp.DataSet):\n df = df.data\n t = Template(cprt.OVERVIEW_TABLE_HEADER)\n if \"int\" in mode:\n tbl_header = t.substitute(sim_entity=\"to another Test Compound\")\n else:\n tbl_header = t.substitute(sim_entity=\"to a Reference\")\n report = [cprt.OVERVIEW_TABLE_INTRO, tbl_header]\n row_templ = Template(cprt.OVERVIEW_TABLE_ROW)\n idx = 0\n for _, rec in df.iterrows():\n act_cutoff_low = ACT_CUTOFF_PERC\n act_cutoff_high = ACT_CUTOFF_PERC_H\n idx += 1\n well_id = rec[\"Well_Id\"]\n mol = mol_from_smiles(rec.get(\"Smiles\", \"*\"))\n rec[\"mol_img\"] = mol_img_tag(mol)\n rec[\"idx\"] = idx\n if \"Pure_Flag\" not in rec:\n rec[\"Pure_Flag\"] = \"n.d.\"\n\n rec[\"Act_Flag\"] = \"active\"\n rec[\"Max_Sim\"] = \"\"\n rec[\"Link\"] = \"\"\n rec[\"Col_Sim\"] = cprt.COL_WHITE\n has_details = True\n if rec[\"Activity\"] < act_cutoff_low:\n has_details = False\n rec[\"Act_Flag\"] = \"inactive\"\n # print(rec)\n # similar references are searched for non-toxic compounds with an activity >= LIMIT_ACTIVITY_L\n if rec[\"Activity\"] < LIMIT_ACTIVITY_L or rec[\"Activity\"] > act_cutoff_high or rec[\"Toxic\"] or rec[\"OverAct\"] > OVERACT_H:\n similars_determined = False\n if rec[\"OverAct\"] > OVERACT_H:\n rec[\"Max_Sim\"] = \"Overact.\"\n rec[\"Col_Sim\"] = cprt.COL_RED\n else:\n similars_determined = True\n assign_colors(rec)\n convert_bool(rec, \"Toxic\")\n\n if has_details:\n detailed_cpds.append(well_id)\n details_fn = sanitize_filename(well_id)\n plate = rec[\"Plate\"]\n rec[\"Link\"] = '<a href=\"../{}/details/{}.html\">Detailed<br>Report</a>'.format(\n plate, details_fn)\n if similars_determined:\n if \"int\" in mode:\n # similar = {\"Similarity\": [rec[\"Similarity\"]]}\n similar = pd.DataFrame(\n {\"Well_Id\": [well_id], \"Similarity\": [rec[\"Similarity\"]]})\n else:\n similar = sim_refs[sim_refs[\"Well_Id\"] == well_id].compute()\n similar = similar.sort_values(\"Similarity\",\n ascending=False).reset_index()\n if len(similar) > 0:\n max_sim = round(\n similar[\"Similarity\"][0] * 100, 1) # first in the list has the highest similarity\n rec[\"Max_Sim\"] = max_sim\n if max_sim >= LIMIT_SIMILARITY_H:\n rec[\"Col_Sim\"] = cprt.COL_GREEN\n elif max_sim >= LIMIT_SIMILARITY_L:\n rec[\"Col_Sim\"] = cprt.COL_YELLOW\n else:\n rec[\"Col_Sim\"] = cprt.COL_WHITE\n print(\"ERROR: This should not happen (Max_Sim).\")\n else:\n rec[\"Max_Sim\"] = \"< {}\".format(LIMIT_SIMILARITY_L)\n rec[\"Col_Sim\"] = cprt.COL_RED\n\n if not highlight:\n # remove all coloring again:\n remove_colors(rec)\n report.append(row_templ.substitute(rec))\n report.append(cprt.TABLE_EXTRO)\n return \"\\n\".join(report), detailed_cpds",
"def _agg_proportions(df, members=None):\n p = df.copy()\n if members is not None:\n p = p.iloc[members]\n p = p.T.assign(\n group=pd.factorize(p.columns)[0],\n label=pd.factorize(p.columns)[-1],\n value=p.sum(), #/ p.sum().sum() * p.shape[0],\n row_count=p.shape[0]\n )\n p = p[['label', 'group', 'value', 'row_count']]\n p.columns = ['label', 'group', 'value', 'row_count']\n p = list(p.T.to_dict().values())\n return p",
"def plot_stats(x_axis, y_axis, df, highlight=[]):\n a, b = df[x_axis], df[y_axis]\n\n X_train, X_test, y_train, y_test = train_test_split(a, b, test_size=0.33, random_state=42)\n\n X_train = np.array(X_train).reshape(-1, 1)\n X_test = np.array(X_test).reshape(-1, 1)\n y_train = np.array(y_train).reshape(-1, 1)\n y_test = np.array(y_test).reshape(-1, 1)\n\n regr = linear_model.LinearRegression()\n\n regr.fit(X_train, y_train)\n\n df[y_axis + \" STD\"] = df[y_axis].apply(lambda a: round((a-df[y_axis].mean())/df[y_axis].std()))\n df[y_axis + \" rank\"] = df[y_axis].rank(ascending=False)\n df[x_axis + \" rank\"] = df[x_axis].rank(ascending=False)\n \n mapper = linear_cmap(field_name=y_axis + \" STD\", palette=brewer[\"RdBu\"][len(df[y_axis + \" STD\"].unique())], \n low=min(df[y_axis + \" STD\"].unique()), high=max(df[y_axis + \" STD\"].unique()))\n \n source = ColumnDataSource(df)\n source2 = ColumnDataSource(df[df[\"Player\"].isin(highlight)])\n \n p = figure(x_range=(df[x_axis].min() - df[x_axis].std(), df[x_axis].max() + df[x_axis].std()), \n y_range=(df[y_axis].min() - df[y_axis].std(), df[y_axis].max() + df[y_axis].std()))\n \n r1 = p.circle(x=x_axis, y=y_axis,\n source=source, size=10, color=mapper, line_color=\"black\", legend_group= y_axis + \" STD\")\n\n p.title.text = y_axis + \" vs. \" + x_axis\n p.title.align = \"center\"\n p.xaxis.axis_label = x_axis\n p.yaxis.axis_label = y_axis\n p.legend.location = 'top_left'\n p.legend.title = \"St. Dev's from Avg \" + y_axis\n p.background_fill_color = \"#dddddd\"\n p.background_fill_alpha = 0.1\n \n line_x = [df[x_axis].min().item() - df[x_axis].std().item(), df[x_axis].max().item() + df[x_axis].std().item()]\n line_y = [(line_x[0]*regr.coef_.item()) + regr.intercept_.item(), (line_x[1]*regr.coef_.item()) + regr.intercept_.item()]\n r2 = p.line(line_x, line_y, line_width=2, color=\"black\")\n\n p.add_tools(HoverTool(renderers=[r1], tooltips=[\n (\"Player\", \"@Player\"),\n (y_axis, \"@{\" + y_axis +\"}{0.000}\"),\n (y_axis + \" Rank\", \"#@{\" + y_axis + \" rank}\"),\n (x_axis, \"@{\" + x_axis +\"}{0}\"),\n (x_axis + \" Rank\", \"#@{\" + x_axis + \" rank}\")]))\n\n \n p.add_tools(HoverTool(renderers=[r2], \n tooltips=[(x_axis, \"$x{0000}\"),\n (\"Predicted \" + y_axis, \"$y\")]))\n \n labels = LabelSet(x=x_axis, \n y=y_axis, text=\"Player\", y_offset=8,\n text_font_size=\"11px\", text_color=\"#555555\",\n source=source2, text_align='center')\n \n p.add_layout(labels)\n\n st.bokeh_chart(p)",
"def plot_qc_percents(qc_df):\n # Record NA values as 0\n qc_df = qc_df.fillna(0).set_index(\"sample\")\n r.par(mfrow=np.array([1,2]))\n num_samples = len(qc_df.num_reads)\n r_opts = r.options(scipen=10)\n r.options(r_opts)\n r.par(bty=\"n\", lwd=1.7, lty=2)\n r.dotchart(convert_to_r_matrix(qc_df[[\"percent_mapped\",\n \"percent_unique\",\n \"percent_ribo\"]]),\n xlab=\"Percent reads\",\n lcolor=\"black\",\n pch=19,\n gcolor=\"darkblue\",\n cex=0.8)\n r.par(bty=\"n\")\n r.dotchart(convert_to_r_matrix(qc_df[[\"percent_exons\",\n \"percent_cds\",\n \"percent_3p_utr\",\n \"percent_5p_utr\", \n \"percent_introns\"]]),\n xlab=\"Percent reads\",\n lcolor=\"black\",\n pch=19,\n gcolor=\"darkblue\",\n cex=0.8)",
"def maxi():\r\n st.dataframe(Data.style.highlight_max(axis=0))",
"def showRecommendations(self):\n\t\t#rn_im_index = np.where( df_index == 10561)[0][0] #similar color but no similar shape\n\t\t\n\t\t#rn_im_index = np.where( df_index == 22472)[0][0] # similar color but no similar shape\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(61706, 0.16241728944546732), (94073, 0.15613203034271395), (61836, 0.15494992784841455), (61835, 0.15494992784841452), (61825, 0.15163383319000062), (61745, 0.15031672266647675), (26848, 0.14479933826475058), (61760, 0.14353241349060006)]\n\n\t\tOutput Color\n\t\t[(22492, 0.72863097869032856), (22482, 0.66834821692729429), (3351, 0.45135804324105538), (29982, 0.40733726762782918), (85603, 0.40595375826379132), (22502, 0.38204339162468243), (29913, 0.36735985661014864), (29581, 0.3669268043422747)]\n\n\t\t\"\"\"\n\t\t\n\t\t#rn_im_index = np.where( df_index == 26746)[0][0] #Similar shape and similar color\n\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(27380, 0.1817530749164192), (29457, 0.1353165149065198), (1336, 0.12885937891206711), (27355, 0.12241573468787358), (29704, 0.12009259771972887), (29603, 0.11196184515165516), (29594, 0.11196184515165516), (26809, 0.11097441686854403)]\n\n\t\tOutput Color\n\t\t[(26809, 0.80634030626051745), (27380, 0.79789790693763663), (27355, 0.79542468562323521), (27018, 0.74331190002098657), (27197, 0.73454915804315535), (26913, 0.73410853271216192), (26905, 0.73410853271216192), (27617, 0.73098284820738935)]\n\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 27288)[0][0] #blurry image\n\t\t#rn_im_index = np.where( df_index == 27294)[0][0] # Similar Color and similar shape\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(27133, 0.35485652442453264), (27128, 0.32115384345167203), (27151, 0.25627343126278629), (27145, 0.25366123246450772), (27237, 0.25131923154633229), (27303, 0.22385072157466906), (27139, 0.22229444866797674), (27299, 0.22049959456469045)]\n\n\t\tOutput Color\n\t\t[(27133, 0.96240728970715483), (27128, 0.96009243888171958), (27145, 0.94268324228267275), (27303, 0.93286490646887354), (27139, 0.9244608465512546), (27237, 0.87199166625029467), (27049, 0.86531150055386774), (27066, 0.86139090244063599)]\n\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 52528)[0][0] # some have similar shape and some have similar color\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(93975, 0.31989999912901967), (61835, 0.31528273207820834), (61836, 0.31528273207820828), (61745, 0.31261425625988493), (61825, 0.31226105280375738), (61706, 0.31006537435901937), (61760, 0.29497111365575518), (94073, 0.28643748527418661)]\n\t\t\n\t\tOutput Color\n\t\t[(52542, 0.7633360888150692), (27402, 0.7582411610565466), (59301, 0.71242045321505865), (27329, 0.69968585913071302), (52539, 0.6996578131078881), (27335, 0.69215065941368603), (52469, 0.69152133535379212), (52473, 0.68799897765402473)]\n\n\t\tOutput c2d\n\t\t[(85620, 39705.292103093299), (52469, 38947.56038916672), (93975, 37706.480789897578), (52542, 37604.001320837888), (27402, 36709.321927197598), (27118, 36164.067396937884), (63718, 35906.648243400079), (63709, 35906.648243400079)]\n\t\n\n\t\t\"\"\"\n\t\t# Similar in color but dissimilar in shape\n\t\t#rn_im_index = np.where( df_index == 94380)[0][0] # Similar with color. Similar with shape. Very good with shape. Good Recommendations 52469(Shape) 94383 (color)\n\t\t\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(52469, 0.22380221768394279), (61836, 0.17343131445222859), (61835, 0.17343131445222859), (61825, 0.1713416617900273), (61745, 0.16700001977657994), (35922, 0.16614680579871874), (61715, 0.16380442450621885), (61706, 0.16194776280945139)]\n\t\t\n\t\tOutput Color\n\t\t[(94383, 0.69238692936637536), (26960, 0.58939898313472816), (26957, 0.58939898313472816), (29412, 0.58436143235370375), (29371, 0.58436143235370375), (29453, 0.5745231714319865), (29616, 0.57270906625007156), (29970, 0.57018718322031081)]\n\n\t\tOutput c2d\n\t\t[(94383, 37226.57203206882), (52558, 37007.251051234598), (26960, 36448.333956681076), (26957, 36448.333956681076), (1441, 36380.413117473567), (50197, 35994.006084886816), (94057, 35671.971168930344), (27533, 35061.385308567049)]\n\t\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 94080)[0][0] # some have similar shape and some have similar color\n\t\t\"\"\"\n\t\tOutput c2d\n\t\t[(57755, 29305.613736454678), (61797, 28828.064153886309), (61731, 28828.064153886309), (29417, 27874.375538422293), (63771, 27596.578857622582), (63765, 27596.578857622582), (63758, 27442.936837903482), (63750, 27442.936837903482)]\n\n\t\t\"\"\"\n\n\t\t# Completely random image that doesn't have similar images\n\t\t#rn_im_index = np.where( df_index == 1334)[0][0]\n\t\tdf = self.df\n\t\tdf_index = df.index.values\n\t\trn_im_index = random.randint(0, df.shape[0])\n\n\t\tprint \"random image index: {} id:{}\".format(rn_im_index, df_index[rn_im_index])\n\n\t\ti = rn_im_index\n\t\tindex_x = df.iloc[0:i,i].index\n\t\tindex_y = df.iloc[i,i:df.index.values.size].index\n\n\t\tvalues_x = df.iloc[0:i,i].values\n\t\tvalues_y = df.iloc[i,i:df.index.values.size].values\n\n\t\tindex = np.concatenate((index_x, index_y),axis=0)\n\t\tvalues = np.concatenate((values_x,values_y),axis=0)\n\n\t\tzipped = zip(index,values)\n\t\tzipped_sorted = sorted(zipped, key=lambda x: x[1])[::-1][0:8]\n\t\t#zipped_sorted = sorted(zipped, key=lambda x: x[1])[0:8]\n\t\tprint zipped_sorted\n\t\tindex , values = zip(*zipped_sorted)\n\t\t#print index\n\t\ttop_n_similar_images = map(int,list(index))\n\t\t#return df, duplicated_items\n\n\t\t# Filter out threshold less than 0.5\n\t\t#if self.mode == 'RGB':\n\t\tindex_aux = []\n\t\ti = 0\n\t\tfor im_id in top_n_similar_images:\n\t\t\tif self.mode == 'RGB' and values[i] > 0.5:\n\t\t\t\tindex_aux.append(im_id)\n\t\t\telif self.mode == 'L' and values[i] > 0.1:\n\t\t\t\tindex_aux.append(im_id)\n\t\t\ti += 1\n\n\t\ttop_n_similar_images = index_aux\n\n\t\tif len(top_n_similar_images) > 0 or self.mode == 'L':\n\t\t\n\t\t\t#print top_n_similar_images\n\t\t\ttop_n_similar_images = self.removeDuplicates(top_n_similar_images)\n\t\t\t#print top_n_similar_images\n\t\n\t\t\t#top_n_similar_images = df.sort_values(by=[rn_im_index],ascending = False).loc[:,rn_im_index][0:10].index.values\n\t\t\t\n\t\t\toutput = open(self.data_path + 'X_original.pkl', 'r')\n\t\t\tX_original = cPickle.load(output)\n\t\t\toutput.close()\n\t\t\t\n\t\t\t#print top_n_similar_images[0]\n\t\t\tindex = np.asarray(index,dtype='int64')\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tself.reconstructImage(X_original[rn_im_index]).show()\n\t\t\telif self.mode == 'L':\n\t\t\t\tim_base = X_original[rn_im_index] * 256\n\t\t\t\tim_base = np.asarray(im_base, dtype='float64')\n\t\t\t\tim_base = filter.sobel(im_base)\n\t\n\t\t\t\tio.imshow(im_base)\n\t\t\t\tio.show()\t\n\n\t\t\tfor i in xrange(0,len(top_n_similar_images)):\n\t\t\t\tindex_i = np.where( df_index == top_n_similar_images[i])[0][0]\n\n\t\t\t\tif self.mode == 'L':\n\t\t\t\t\tim_i = X_original[index_i] * 256\n\t\t\t\t\tim_i = np.asarray(im_i, dtype='float64')\n\t\t\t\t\tim_i = filter.sobel(im_i)\n\t\n\t\t\t\t\tio.imshow(im_i)\n\t\t\t\t\tio.show()\n\n\t\t\t\telif self.mode == 'RGB':\n\t\t\t\t\tself.reconstructImage(X_original[index_i]).show()\n\t\telse:\n\t\t\tprint \"There are no image higher than the minimum threshold\"",
"def df_to_html(df, percentage_columns=None): # pragma: no cover\n big_dataframe_setup()\n try:\n res = \"<br><h2> {} </h2>\".format(df.name)\n except AttributeError:\n res = \"\"\n df.style.set_properties(**{\"text-align\": \"center\"})\n res += df.to_html(\n formatters=_formatters_dict(\n input_df=df, percentage_columns=percentage_columns\n )\n )\n res += \"<br>\"\n return res",
"def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')",
"def context_study_stats(frame_path=METRICS_DIR+'/merge.csv'):\n frame = pd.read_csv(frame_path)\n print(frame['LOC_prod'].mean())\n print(frame['LOC_prod'].sum())\n print(frame['LOC_test'].sum())\n print(frame['no_mutations'].sum())\n print(frame.shape[0])\n\n sizes = frame.groupby('project').size()\n prod = frame.groupby('project')['LOC_prod'].sum( )\n test = frame.groupby('project')['LOC_test'].sum()\n mutants = frame.groupby('project')['no_mutations'].sum()\n\n result = pd.DataFrame({'project': list(sizes.index),\n 'size': list(sizes),\n 'prod': list(prod),\n 'test': list(test),\n 'mutants': list(mutants)},\n columns=['project', 'size', 'prod', 'test', 'mutants'])\n print(result.to_latex())",
"def hf(res, field):\n return res.highlights(field, minscore=0)",
"def show_df_by_tags(df, tags):\n return st.dataframe(filter_df(df, tags)) if not 'Expert' in df.columns else st.dataframe(filter_df(df, tags), height=150, width=450)",
"def SetProportion(self, p):\r\n\r\n self.proportion = p",
"def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return"
] | [
"0.61285084",
"0.58342224",
"0.58050156",
"0.5471041",
"0.53932345",
"0.5360068",
"0.53036433",
"0.52993584",
"0.5256975",
"0.52450204",
"0.51859456",
"0.51738006",
"0.5134529",
"0.51012987",
"0.50567937",
"0.5041237",
"0.5036244",
"0.5028948",
"0.49912107",
"0.49670103",
"0.49612236",
"0.49584636",
"0.49491405",
"0.49412307",
"0.49286312",
"0.49239033",
"0.4916741",
"0.49040487",
"0.48410133",
"0.48397213"
] | 0.58691823 | 1 |
Gets a client item from pypodio, from the app_id given to the podioApi object when it was created. Optionally it receives an app_token; in that case it doesn't need to fetch it from the database | def _getClient(self, app_token=None):
if app_token is None:
from . import models
app_token = models.Aplicacion.objects.get(app_id=self.app_id).app_token
return api.OAuthAppClient(settings.CLIENT_ID, settings.CLIENT_SECRET, self.app_id, app_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_app(self, app_id):\n return req(self.logger, self.access_token, 'GET', '/apps/'+app_id, {})",
"def client(app=None):\n fs_client = _utils.get_app_service(app, _FIRESTORE_ATTRIBUTE, _FirestoreClient.from_app)\n return fs_client.get()",
"async def getAppProduct(self, item_id=None):\n payload = {}\n \n if item_id:\n payload[\"item_id\"] = item_id\n \n\n # Parameter validation\n schema = CatalogValidator.getAppProduct()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product/{item_id}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"item_id\",\"description\":\"product id for a particular product.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"item_id\",\"description\":\"product id for a particular product.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", item_id=item_id)\n query_string = await create_query_string(item_id=item_id)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product/{item_id}/\", item_id=item_id), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def api_client(api_app):\n return api_app.test_client()",
"def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj",
"def get_client_by_id(self, client_id=None):\n # search client_id in list and return the client object\n for client in self.client_list:\n if client_id == client.client_id:\n return client.copy()\n\n # return empty client otherwise\n return Client()",
"def get(self, id: int) -> Client:\n\n return self.__clients[id]",
"async def get_app(self, app_id: str) -> dict:\r\n return await self.get(API_APP.format(app_id=app_id))",
"def test_get_client(oauth_client, response):\n response.get(f\"https://api.mollie.com/v2/clients/{CLIENT_ID}\", \"client_single\")\n\n client = oauth_client.clients.get(CLIENT_ID)\n assert isinstance(client, Client)\n assert client.id == CLIENT_ID\n assert client.resource == \"client\"\n assert client.organisation_created_at == \"2018-03-21T13:13:37+00:00\"",
"def get_app(self, app_id: Optional[str] = None) -> JSON:\n\n # TODO: unserialize\n return self.db.get_app(app_id)",
"def read_item(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n item = crud.item.get(db=db, id=id)\n if not item:\n raise HTTPException(status_code=404, detail='Item not found')\n if not crud.user.is_superuser(current_user) and (item.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail='Not enough permissions')\n return item",
"def get_client_application(self, application_id, buyer_team__reference):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n\n url = 'clients/applications/{0}'.format(application_id)\n return self.get(url, data)",
"def get_item(self, id: str, user: User) -> Optional[T]:",
"def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def client(self, id):\n return self.query(Client).filter(Client.id == id).one()",
"def get_client_by_id(self, client_id):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT WHERE id={}\"\"\".format(client_id))\r\n return cursor.fetchall()",
"def read_item(\n db: Session = Depends(deps.get_db),\n item: models.Item = Depends(deps.get_owned_item_by_id),\n current_user: schemas.UserInDB = Depends(deps.get_current_active_user),\n) -> Any:\n return item",
"def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError",
"def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)",
"def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]",
"def get_clientid(self):\n\n url = f'https://{self.__api}/v1/objects/client'\n body = {\"filter\": {}}\n with requests.post(url, json=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status_code not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status_code, response.content)\n return response.json().get('body')[0].get('id')",
"def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)",
"def get(self, id_cliente):\n cliente = get_cliente_id(id_cliente)\n if not cliente:\n api.abort(404)\n else:\n return cliente",
"def read_random_item(user_id: int, db: Session = Depends(get_db)):\n # Call function to retrieve a random item of a given user\n return crud.get_random_item(user_id, db)",
"def get_initial_resource(client, api_id):\n response = client.get_resources(\n restApiId=api_id\n )\n return response['items'][0]",
"def api_client(app):\n return app.test_client()",
"def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__",
"async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )",
"def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()",
"def get_client(db: str, collection: str) -> Collection:\r\n return client[db][collection]"
] | [
"0.60084134",
"0.59242415",
"0.5678864",
"0.56198317",
"0.55885607",
"0.5561114",
"0.55412453",
"0.5527581",
"0.54644537",
"0.5451878",
"0.54084194",
"0.5390055",
"0.52960443",
"0.52796483",
"0.5270817",
"0.52567786",
"0.52563524",
"0.5234272",
"0.5232072",
"0.52201605",
"0.5202682",
"0.5176048",
"0.5165307",
"0.5161715",
"0.5154489",
"0.51544213",
"0.51489854",
"0.5136011",
"0.5129438",
"0.5129279"
] | 0.6530229 | 0 |
Returns raw information of the api object's application, as a Python dictionary. | def getAppInfo(self):
data = self._client.Application.find(self.app_id)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self):\n return {\n 'app_id': self.id,\n 'name': self.name,\n 'app_info': [item.serialize for item in self.appinfo.all()]\n }",
"def get_app_details(self, app_id):\n app_data = AppData.objects.get(uid=app_id)\n return model_to_dict(app_data)",
"def info ():\n\n info = {\n 'name' : app.config['APPLICATION_NAME'],\n 'short_name' : app.config['APPLICATION_SHORT_NAME'],\n 'main_page_url' : app.config['APPLICATION_MAIN_URL'],\n # 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),\n 'css' : 'span.smalltext { font-size: smaller }',\n 'supported_langs_query' : [ LANG ],\n }\n return make_json_response (info)",
"def map_to_app(self, app):\n app['build_infos'] = {}\n app['build_infos']['ssh_username'] = self.ssh_username.data\n app['build_infos']['source_ami'] = self.source_ami.data\n app['build_infos']['source_container_image'] = self.container.data\n app['build_infos']['subnet_id'] = self.subnet_id.data",
"def _get_app_info_Primary(self):\n return self._Primary_app_info",
"def info(self):\n return self._fetch_json('/api/info')",
"def process_app_info(self):\n pass",
"def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json",
"def info(self) -> dict:",
"def get_info(self):\n return {}",
"def info(self):\r\n return self._get('info', {})",
"def android_app_info(self) -> 'outputs.AndroidAppInfoResponse':\n return pulumi.get(self, \"android_app_info\")",
"def info(self):\n return {}",
"def json_dumps(self):\n application_obj = {\n \"id\": self.id,\n \"party\": Party.get_party_by_name(name=self.party_name),\n \"office\": Office.get_office_by_name(name=self.office_name),\n \"user\": User.find_user_by_id(id=self.user_id),\n \"date_created\": self.date_created,\n \"status\":self.status\n }\n return application_obj",
"def info() -> Dict[str, Any]:",
"async def get_app(self, app_id: str) -> dict:\r\n return await self.get(API_APP.format(app_id=app_id))",
"def serialize_data(self, app) -> dict:",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def get_main_information(self) -> Dict:\n if self.information is None:\n self.information = self.orthanc.get_instance_information(\n self.identifier\n )\n\n return self.information",
"def ios_app_info(self) -> 'outputs.IosAppInfoResponse':\n return pulumi.get(self, \"ios_app_info\")",
"def get_info(self):\n url = self._url_for_op('info')\n data= None # This will be a GET request since data is None\n response = self._get_raw_response(self._get_json_headers,\n self._get_json_response, url, data)\n response = json.loads(response)\n self.api_info = response['results']\n return self.api_info",
"def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")",
"def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")",
"def applicationsdetails():\n appdicts = db.hgetall('applications')\n finaldict = OrderedDict()\n for appname in sorted(appdicts):\n instances = json.loads(appdicts.get(appname))\n instance_map = OrderedDict()\n for key in sorted(instances):\n instance_map.__setitem__(key,instances.get(key))\n finaldict.__setitem__(appname,instance_map)\n return render_template('robots.html', appdicts=finaldict)",
"def asDict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'type': self.type,\n 'favicon': self.favicon,\n 'thumbnails': self.thumbnails,\n 'access': self.access,\n 'client_details': self.client_details\n }",
"def _GetInfo(self) -> Dict[str, Union[str, Dict[str, str]]]:\n version_dict = version.Version()\n\n return {\n \"title\":\n \"GRR Rapid Response API\",\n \"description\":\n \"GRR Rapid Response is an incident response framework \"\n \"focused on remote live forensics.\",\n \"contact\": {\n \"name\": \"GRR GitHub Repository\",\n \"url\": \"https://github.com/google/grr\"\n },\n \"license\": {\n \"name\": \"Apache 2.0\",\n \"url\": \"http://www.apache.org/licenses/LICENSE-2.0\"\n },\n \"version\": (f\"{version_dict['major']}.\"\n f\"{version_dict['minor']}.\"\n f\"{version_dict['revision']}.\"\n f\"{version_dict['release']}\"),\n }",
"def info(self):\n return {\n \"dimension_x\": self.dimension_x,\n \"dimension_y\": self.dimension_y,\n \"api_level\": self.api_level,\n \"device_model\": self.model,\n }",
"def api(self) -> pulumi.Output[Optional['outputs.ApplicationApi']]:\n return pulumi.get(self, \"api\")",
"def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None",
"def data(self):\n return { # TODO Actually query for this shit\n \"foo\": self.__name__,\n \"url\": f\"{self.request.resource_url(self)}\",\n }"
] | [
"0.7094638",
"0.694228",
"0.6678645",
"0.6611751",
"0.65245116",
"0.6498092",
"0.64952576",
"0.64924556",
"0.64852405",
"0.6457486",
"0.6449787",
"0.64373326",
"0.64291966",
"0.64280385",
"0.6368658",
"0.6310357",
"0.6304767",
"0.629864",
"0.6292921",
"0.6239421",
"0.6216532",
"0.6197269",
"0.6197269",
"0.61702394",
"0.6103841",
"0.60986775",
"0.60885483",
"0.6081738",
"0.60765374",
"0.6033245"
] | 0.74249846 | 0 |
Returns all items belonging to a certain view, given by its ID. As all new methods, it automatically asks for the external ID | def get_items_by_view(self, view_id, depth=1):
data = self.filter_by_view(
int(self.app_id), int(view_id),{
'limit': 500,
},
)["items"]
fields = [self.make_dict(item, external_id=False, depth=depth, optimize=True) for item in data]
return fields | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(self, item_id):\n pass",
"def view(self, view_id):\r\n return resources.View(self, view_id)",
"def get_user_items(self, id):\n return self.execute(TABELLE['items']['select']['by_id'], (id,))",
"def __getView(self, raw_view_id):\n if iDevice.dump_view:\n self.__dumpview()\n id_RE = re.compile(\"^(id/\\D+)\\((\\S+)\\)$\")\n if DEBUG:\n printLog(self.threadName + \"[__getView] raw view id:%s\" % raw_view_id)\n if id_RE.match(raw_view_id):\n # search the child by sequence path\n viewId, seq_string = id_RE.search(raw_view_id).groups()\n if DEBUG:\n printLog(self.threadName + \"[__getView] view id:%s, seq:%s\" % (viewId, seq_string))\n seqs = seq_string.split(',')\n tv = self.__getChildView(viewId, seqs)\n else:\n # search with the given id directly\n if DEBUG:\n printLog(self.threadName + \"finding view by id %s ...\" % raw_view_id, logging.DEBUG)\n tv = self.vc.findViewById(raw_view_id)\n # if tv:\n # printLog('Found view %s.' % raw_view_id, logging.DEBUG)\n # self.resultFlag = True\n # else:\n # printLog('Target view %s not found.' % raw_view_id, logging.ERROR)\n # self.resultFlag = False\n\n return tv",
"def get_items_for_catalog(catalog_id):\n pass",
"def getview(viewid) :\n\treturn Jikji.getinstance().getview(viewid)",
"def get_item_detail(item_id):\n pass",
"def _viewer_item_by_id(self, vid):\n def find_viewer_item(stack_items):\n for stack_item in stack_items:\n for viewer_item in stack_item.get('viewers'):\n if viewer_item['id'] == vid:\n return viewer_item\n\n if len(stack_item.get('children')) > 0:\n return find_viewer_item(stack_item.get('children'))\n\n viewer_item = find_viewer_item(self.state.stack_items)\n\n return viewer_item",
"def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)",
"def getViews(read):\n ...",
"def id_api(request, **args):\n ids = request.GET.get('id')\n data = ApiView.objects.filter(id=ids)\n serializer = ApiViewSerializer(data,many=True)\n return Response(serializer.data)",
"def get_all(self, *ids):",
"def get(self, _id):",
"def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)",
"def read(self, request, pk):\n if pk is None:\n return self.model.objects.all()\n else:\n return self._object_get(pk)",
"def get_catalog_items(id):\n\n username = login_session.get('username', None)\n catalogs = session.query(Catalog).all()\n selected_catalog = session.query(Catalog).filter_by(id=id).one()\n items = selected_catalog.items\n catalogs_display = [\n {\n 'id': catalog.id,\n 'name': catalog.name\n } for catalog in catalogs]\n items_display = [{'id': item.id, 'title': item.title} for item in items]\n items_summary = '{0} Items ({1} items)'.format(\n selected_catalog.name,\n len(items_display))\n return render_template(\n 'home.html',\n catalogs_display=catalogs_display,\n items_display=items_display,\n items_summary=items_summary,\n username=username)",
"def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)",
"def get_from_id(self,id=None):\n if id is None:\n return(self.items)\n if type(id) is int:\n for item in self.items:\n if item.id == id: return(item)\n items=[]\n if type(id) is list:\n return ([item.id for item in self.items if (item.id in id)])",
"def abstract_get(self, model, id=False):\n return self.env[model].sudo().browse(id) if id else self.env[model].search([])",
"def viewItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n item = session.query(Item).filter_by(id=item_id).one()\n return render_template('viewitem.html', sport_id=sport_id, item_id=item_id,\n item=item, sport=sport)",
"def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})",
"def get_specific_item(model, type, id):\n if(type == \"office\"):\n return model.get_office(id)\n elif(type == \"party\"):\n return model.get_party(id)\n return []",
"def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)",
"def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)",
"def client_by_id_view(request):\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n id = request.matchdict['id']\n query = request.dbsession.query(Client).filter(\n Client.id == id).first()\n return Utils.serialize_one(query)",
"def item_detail(request, item_id):\n # Select product based on URL param\n item = SELECT('item', where=f'id = {item_id}', _print=False)\n\n context = {\n 'item': item,\n 'photos': [item['photo_primary']] + item['photos']\n }\n return render(request, 'item_detail.html', context)",
"def get_object(self, vim_type, vim_id):\n content = self.soap_client.content\n try:\n items = [\n item\n for item in content.viewManager.CreateContainerView(\n content.rootFolder, [vim_type], recursive=True\n ).view\n ]\n except Exception:\n logger.exception(\n 'Unable to get VMware object. Type: %s, ID: %s.', vim_type, vim_id\n )\n raise VMwareBackendError('Unknown error.')\n for item in items:\n if item._moId == vim_id:\n return item",
"def get_item_by_id(request, pk):\n item = get_object_or_404(StockItem, pk=pk)\n res_dict = {\n 'id': item.id,\n 'name': item.name,\n 'count': item.count,\n 'date_added': item.date_added,\n 'exp': item.date_of_expiration,\n 'added_by': item.added_by,\n 'cat': str(item.fk_category),\n 'subcat': str(item.fk_subcategory),\n 'notes': item.notes\n }\n return JsonResponse(res_dict)",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)"
] | [
"0.6050449",
"0.60087174",
"0.58421457",
"0.5774646",
"0.57450885",
"0.5733086",
"0.5725173",
"0.5718675",
"0.5693349",
"0.56803656",
"0.56435144",
"0.5628805",
"0.5583422",
"0.5583316",
"0.5546081",
"0.5523566",
"0.55118436",
"0.5508082",
"0.5507996",
"0.5435928",
"0.5392885",
"0.5388824",
"0.5372345",
"0.5367665",
"0.5345485",
"0.53447473",
"0.53326285",
"0.53316176",
"0.53013355",
"0.5249649"
] | 0.65132153 | 0 |
Creates a dictionary with the external_id of the item's fields ad keys, and their values as the dictionary values. | def makeDict(self, item, nested=False, no_html=False):
dictionary = dict([(field["external_id"], self.getFieldValue(field, nested, no_html)) for field in item["fields"]])
return {'item': item["item_id"], 'values':dictionary} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_dict(self, item, external_id=True, no_html=False, depth=1, optimize=False):\n if external_id:\n key_type = \"external_id\"\n else:\n key_type = \"field_id\"\n\n dictionary = dict([(field[key_type], {\"label\":field[\"label\"], \"type\": field[\"type\"], \"value\": self.getFieldValue(field, no_html, external_id=external_id, depth=depth, optimize=optimize)}) for field in item[\"fields\"]])\n return {'item': item[\"item_id\"], 'values':dictionary}",
"def getItemDict(self, item):\n newDict = {}\n itemDict = item.__dict__\n newDict['enabled'] = itemDict['wdgEnabled'].isChecked()\n newDict['label'] = str(itemDict['wdgLabel'].text())\n newDict['type'] = str(itemDict['wdgType'].currentText())\n newDict['value'] = str(itemDict['wdgValue'].text())\n newDict['comment'] = str(itemDict['wdgComment'].text())\n return newDict",
"def _build_eitem_dict(self, eitem_json, document_pid):\n self._apply_url_login(eitem_json)\n self._set_record_import_source(eitem_json)\n dois = [\n doi\n for doi in self.json_data.get(\"identifiers\", [])\n if doi[\"scheme\"] == \"DOI\"\n ]\n eitem_json.update(\n dict(\n document_pid=document_pid,\n open_access=self.open_access,\n identifiers=dois,\n created_by={\n \"type\": \"import\",\n \"value\": self.metadata_provider,\n },\n urls=self.json_data[\"_eitem\"].get(\"urls\", []),\n description=self.json_data[\"_eitem\"].get(\"description\", \"\"),\n )\n )",
"def extract_key_item_data(item_data):\n extracted_item_data = {}\n\n for item_id in item_data:\n key_data = {}\n key_data[\"id\"] = item_id\n key_data[\"name\"] = item_data[item_id][\"name\"]\n key_data[\"image\"] = item_data[item_id][\"image\"][\"full\"]\n key_data[\"gold\"] = item_data[item_id][\"gold\"][\"total\"]\n key_data[\"tags\"] = item_data[item_id][\"tags\"]\n extracted_item_data[item_id] = key_data\n \n return extracted_item_data",
"def item_to_dict(dict_item):\n info = {}\n item_info = None\n\n for k, v in dict_item.items():\n if k == 'ItemType':\n info[k] = api.item_dict_inv[dict_item['ItemType']]\n elif k == 'Item':\n item_info = colectica.parse_xml(v, api.item_dict_inv[dict_item['ItemType']])\n else:\n info[k] = v\n d = {**info, **item_info}\n return d",
"def item_dict():\n\n items = {'page': 'pages', 'table': 'tables',\n 'viz': 'vizualisation', 'column': 'columns'}\n return items",
"def _item_to_dict(self, raw_response):\n\n if 'Item' not in raw_response:\n return {}\n\n return {\n field.name: raw_response['Item'][field.name][field.data_type] for field in self._available_fields\n }",
"def _format_primary_key_data(self, request):\n \n \n for index, item in enumerate(request.data['items']):\n try:\n request.data['items'][index]['item'] = {'id': item['id']}\n del request.data['items'][index]['id']\n except KeyError as e:\n logger.warn(e)\n \n return request",
"def get_item_data(item):\n\n return OnedriveItem(\n id=item.get('id'),\n name=item.get('name'),\n web_url=item.get('webUrl'),\n created_by=item.get('createdBy')\n ).__dict__",
"def get_dict_repr(self):\n return { 'id': self.invoice_id,\n self.json_id: self.items }",
"def custom_fields(self) -> dict:\n url = f'{self.api_url}Fields?apiKey={self.api_key}'\n r_dict = self._es_get_request(url)\n self._check_response(r_dict)\n\n return {l['Field']['Name']: l['Field']['Id'] for l in\n r_dict['ApiResponse']['Data']['Fields']} # list of dicts",
"def to_dict(self) -> Dict[str, Any]:\n\n data = self._entry.to_dict()\n del data[\"item-hash\"]\n data[\"item\"] = [self._blob.to_dict()]\n\n return data",
"def make_to_dict(item, include_timestamp):\n return {\n '%s:%s' % (cell.family, cell.qualifier): (cell.value, cell.timestamp) if include_timestamp else cell.value\n for cell in item\n }",
"def _prepare_external_id_vals(self, cr, uid, res_id, ext_id, referential_id, context=None):\n ir_model_data_vals = {\n 'name': self.prefixed_id(ext_id),\n 'model': self._name,\n 'res_id': res_id,\n 'referential_id': referential_id,\n 'module': 'extref/' + self.pool.get('external.referential').\\\n read(cr, uid, referential_id, ['name'])['name']\n }\n return ir_model_data_vals",
"def prepare_external(self, external_id: str, external_entry: Dict[str, Any]) -> Dict[str, Any]:\n return external_entry",
"def granule_core_fields(item):\n record = {}\n umm = item.get('umm', {})\n record['GranuleUR'] = umm.get('GranuleUR')\n\n meta = item.get('meta', {})\n record['concept-id'] = meta.get('concept-id')\n record['revision-id'] = meta.get('revision-id')\n record['native-id'] = meta.get('native-id')\n return {key: value for key, value in record.items() if value}",
"def make_item_record(cls,itm,x=350,y=200,z=1,sx=1,sy=1,ms=''):\n return {'id':int(itm),'x':x,'y':y,'z':z,'sx':sx,'sy':sy,'ms':ms}",
"def _parse_item(self, item):\n result = {}\n for f in self._invoice_report_item_fields:\n val = get_value_by_relation_path(item, f)\n # when it's function - call it! usefull for Choices\n # (get_<field_name>_display)\n if callable(val):\n val = val()\n elif isinstance(val, datetime.datetime):\n val = val.strftime(self._invoice_report_datetime_format)\n elif isinstance(val, Money):\n val_currency = '{}_currency'.format(self._price_field)\n result[val_currency] = str(val.currency) \\\n if val.currency else self._invoice_report_empty_value\n val = val.amount\n result[f] = str(val) if val else self._invoice_report_empty_value\n\n return result",
"def get_item_dict(self, item):\n item_values = [\n 'item-name', 'current-amount', 'item-price', 'item-cost']\n item_dict = {}\n for value in item_values:\n key = value.split('-')[1]\n item_dict[key] = item.find_element_by_class_name(value)\n item_dict['id'] = item_dict['amount'].get_attribute('data-item_id')\n\n ch_amount = item.find_elements_by_class_name('change-amount')\n for button in ch_amount:\n action = button.get_attribute('data-action')\n item_dict[action] = button\n\n return item_dict",
"def item_to_dynamo_db_item(item):\n now = long(time.time())\n return {\n 'timeserie': {'S': item['timeserie']},\n 'time': {'S': str(item['time'])},\n 'value': {'N': str(item['value'])},\n 'ttl': {'N': str(now + (1 * 60))},\n }",
"def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }",
"def create_external_id_vals(self, cr, uid, existing_rec_id, external_id, referential_id, context=None):\n ir_model_data_vals = \\\n self._prepare_external_id_vals(cr, uid, existing_rec_id,\n external_id, referential_id,\n context=context)\n return self.pool.get('ir.model.data').create(cr, uid, ir_model_data_vals, context=context)",
"def item2id(self):\n if self._item2id is None:\n self._item2id = dict(zip(self.item_unique_vals, range(self.n_items)))\n return self._item2id",
"def _to_dict(self, item):\n if isinstance(item, Buffer):\n ret = {}\n fields = item._all_fields()\n for field in fields:\n ret[field.attr_name()] = self._to_dict(getattr(item, field.attr_name()))\n return ret\n\n if isinstance(item, Struct):\n ret = {}\n for field in item._container_.fields:\n if hasattr(field, 'name'):\n ret[field.name] = self._to_dict(field.get_value(item))\n elif isinstance(field, FieldListContainer):\n for inner_field in field.fields:\n if not isinstance(inner_field, AnonymousField):\n ret[inner_field.name] = self._to_dict(inner_field.get_value(item))\n return ret\n\n if isinstance(item, bytearray):\n return '0x' + binascii.hexlify(item) if item else ''\n\n if isinstance(item, list):\n return [self._to_dict(x) for x in item]\n\n return item",
"def printable_item(item):\n printable = {}\n printable['validation'] = item['validation']\n printable['name'] = item['name']\n printable['optional'] = item['optional']\n if 'define' in item:\n printable['define'] = item['define']\n return printable",
"def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements",
"def items_dict(slist, key=None):\n fields = slist.fields()\n items = [collections.OrderedDict((k, f) for k, f in zip(fields[0], item))\n for item in fields[1:]]\n if key:\n return collections.OrderedDict((i[key], i) for i in items)\n else:\n return items",
"def make_item_dict(params):\n ret = {}\n\n list_len = len(params)\n if list_len%2 != 0:\n raise ItemDataError(str(params))\n\n index = 0\n while index < list_len:\n if params[index]:\n # Allow the value (params[index + 1] here) to be empty (None)?\n # Let Splunk to return an error if it does not support empty value\n ret[params[index]] = params[index + 1]\n else:\n # If key is None, we can not add it to the dictionary\n LOG.debug(\"The {}th key is None with value {}\".format(str(index), str(params[index + 1])))\n index += 2\n\n return ret",
"def concept_id_fields(item):\n return scom.concept_id_fields(item)",
"def serialise(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'items': [i.serialise for i in self.items]\n }"
] | [
"0.7522956",
"0.6268185",
"0.6242335",
"0.61984485",
"0.5945112",
"0.59421134",
"0.59377867",
"0.58974934",
"0.5887909",
"0.58877665",
"0.5864283",
"0.58118176",
"0.5773755",
"0.57677674",
"0.57671314",
"0.57475454",
"0.57330495",
"0.5711399",
"0.5707601",
"0.56359065",
"0.55994135",
"0.55532604",
"0.5552713",
"0.5519838",
"0.5486882",
"0.5480056",
"0.5446139",
"0.5442495",
"0.5431417",
"0.542329"
] | 0.7668655 | 0 |
Creates a dictionary with the external_id of the item's fields ad keys, and their values as the dictionary values. This second versions allows to choose between the field_id or the external_id for the dictionary's key, and adds the field type to the generated dictionary. | def make_dict(self, item, external_id=True, no_html=False, depth=1, optimize=False):
if external_id:
key_type = "external_id"
else:
key_type = "field_id"
dictionary = dict([(field[key_type], {"label":field["label"], "type": field["type"], "value": self.getFieldValue(field, no_html, external_id=external_id, depth=depth, optimize=optimize)}) for field in item["fields"]])
return {'item': item["item_id"], 'values':dictionary} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeDict(self, item, nested=False, no_html=False):\n dictionary = dict([(field[\"external_id\"], self.getFieldValue(field, nested, no_html)) for field in item[\"fields\"]])\n return {'item': item[\"item_id\"], 'values':dictionary}",
"def _item_to_dict(self, raw_response):\n\n if 'Item' not in raw_response:\n return {}\n\n return {\n field.name: raw_response['Item'][field.name][field.data_type] for field in self._available_fields\n }",
"def getItemDict(self, item):\n newDict = {}\n itemDict = item.__dict__\n newDict['enabled'] = itemDict['wdgEnabled'].isChecked()\n newDict['label'] = str(itemDict['wdgLabel'].text())\n newDict['type'] = str(itemDict['wdgType'].currentText())\n newDict['value'] = str(itemDict['wdgValue'].text())\n newDict['comment'] = str(itemDict['wdgComment'].text())\n return newDict",
"def make_dict(cls, fields, fields_kwargs):\n return utils.make_dict(fields, fields_kwargs)",
"def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}",
"def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping",
"def _fields_to_dict(fields_in):\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help, \n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out",
"def custom_fields(self) -> dict:\n url = f'{self.api_url}Fields?apiKey={self.api_key}'\n r_dict = self._es_get_request(url)\n self._check_response(r_dict)\n\n return {l['Field']['Name']: l['Field']['Id'] for l in\n r_dict['ApiResponse']['Data']['Fields']} # list of dicts",
"def _create_fields(self, init=None):\n\t\t# don't require the user to define this, hardcode it in\n\t\tif \"id\" not in self.fields:\n\t\t\tself.fields[\"id\"] = int\n\n\t\tif self.__fields is None:\n\t\t\tself.__fields = {}\n\t\tif self.__field_types is None:\n\t\t\tself.__field_types = self.fields.copy()\n\n\t\tfor k,v in self.fields.iteritems():\n\t\t\tif type(v) is type:\n\t\t\t\t# do NOT instantiate this at this moment, leave the values\n\t\t\t\t# as None\n\t\t\t\tv = None\n\t\t\telse:\n\t\t\t\tself.__field_types[k] = v.__class__\n\n\t\t\tif init is not None and k in init:\n\t\t\t\tcls = self._get_class(self.__field_types[k])\n\n\t\t\t\t# make sure it's the appropriate type\n\t\t\t\t# also don't try to cast it to something if it is None\n\t\t\t\tif init[k] is not None:\n\t\t\t\t\tif cls is unicode:\n\t\t\t\t\t\tv = cls(init[k]).encode(\"utf-8\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tv = cls(init[k])\n\t\t\t\telse:\n\t\t\t\t\tv = None\n\n if k in self.__fields and self.__fields[k] is not None and v is None:\n continue\n\n\t\t\tself.__fields[k] = v\n\n\t\t# add any non-defined fields to self.__fields\n\t\tif init and self.accept_all_fields:\n\t\t\tfor k,v in init.iteritems():\n\t\t\t\tif k not in self.__fields:\n\t\t\t\t\tself.__fields[k] = v\n\n\t\tif init is not None and \"attachments\" in init:\n\t\t\tself._create_attachments(init[\"attachments\"])\n\n\t\tif init:\n\t\t\tself._add_std_fields(init)",
"def _build_eitem_dict(self, eitem_json, document_pid):\n self._apply_url_login(eitem_json)\n self._set_record_import_source(eitem_json)\n dois = [\n doi\n for doi in self.json_data.get(\"identifiers\", [])\n if doi[\"scheme\"] == \"DOI\"\n ]\n eitem_json.update(\n dict(\n document_pid=document_pid,\n open_access=self.open_access,\n identifiers=dois,\n created_by={\n \"type\": \"import\",\n \"value\": self.metadata_provider,\n },\n urls=self.json_data[\"_eitem\"].get(\"urls\", []),\n description=self.json_data[\"_eitem\"].get(\"description\", \"\"),\n )\n )",
"def fields_dict(slist, type=SList):\n fields = slist.fields()\n names = fields.pop(0)\n out = collections.OrderedDict()\n for i, name in enumerate(names[:-1]):\n out[name] = type(slist.fields(i)[1:])\n out[names[-1]] = type([' '.join(f[i + 1:]) for f in fields])\n return out",
"def create_external_id_vals(self, cr, uid, existing_rec_id, external_id, referential_id, context=None):\n ir_model_data_vals = \\\n self._prepare_external_id_vals(cr, uid, existing_rec_id,\n external_id, referential_id,\n context=context)\n return self.pool.get('ir.model.data').create(cr, uid, ir_model_data_vals, context=context)",
"def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field",
"def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map",
"def map_to_db_fields(field_attrs):\n attr_keys = field_attrs.keys()\n field_name = field_attrs[attr_keys.pop(attr_keys.index('name'))]\n field_type_raw = field_attrs[attr_keys.pop(attr_keys.index('type'))]\n\n # field_type - constructor for a django.db.models.fields objects\n try:\n field_type = getattr(fields, field_type_raw)\n except:\n raise Exception(\n \"Can not create field with type {0}\".format(field_type_raw))\n\n field_attributes = {}\n\n for key in attr_keys:\n if key in TO_INT_ATTRS:\n value = int(field_attrs[key])\n elif key in TO_BOOL_ATTRS:\n value = True if field_attrs[key] == 'true' else False\n else:\n value = field_attrs[key]\n\n field_attributes[key] = value\n\n return {field_name: field_type(**field_attributes)}",
"def extend(self, fieldname, valuefactory):\n names = {}\n values = {}\n typename = self._type.__doc__.split('(')[0]\n newtype = collections.namedtuple( typename, list(self._type._fields) + [ fieldname ] )\n for number, value in self._values.items():\n value = newtype( *(list(value) + [ valuefactory(value) ]) )\n names[value.name] = value\n values[number] = value\n \n self._type = newtype\n self._names = names\n self._values = values",
"def _to_dict(self, item):\n if isinstance(item, Buffer):\n ret = {}\n fields = item._all_fields()\n for field in fields:\n ret[field.attr_name()] = self._to_dict(getattr(item, field.attr_name()))\n return ret\n\n if isinstance(item, Struct):\n ret = {}\n for field in item._container_.fields:\n if hasattr(field, 'name'):\n ret[field.name] = self._to_dict(field.get_value(item))\n elif isinstance(field, FieldListContainer):\n for inner_field in field.fields:\n if not isinstance(inner_field, AnonymousField):\n ret[inner_field.name] = self._to_dict(inner_field.get_value(item))\n return ret\n\n if isinstance(item, bytearray):\n return '0x' + binascii.hexlify(item) if item else ''\n\n if isinstance(item, list):\n return [self._to_dict(x) for x in item]\n\n return item",
"def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields",
"def map_field_name_to_attribute() -> typing.Dict:\n return {\n \"tag\": \"tag\",\n \"contact\": \"contact\",\n }",
"def recordtype_create_values(\n coll_id=\"testcoll\", type_id=\"testtype\", update=\"RecordType\",\n type_uri=None, supertype_uris=None\n ):\n d = (\n { 'annal:type': \"annal:Type\"\n , 'rdfs:label': \"%s %s/%s/%s\"%(update, coll_id, \"_type\", type_id)\n , 'rdfs:comment': '%s coll %s, type %s, entity %s'%(update, coll_id, \"_type\", type_id)\n , 'annal:type_view': \"_view/Default_view\"\n , 'annal:type_list': \"_list/Default_list\"\n })\n if type_uri:\n d['annal:uri'] = type_uri\n if supertype_uris is not None:\n d['annal:supertype_uri'] = (\n [ { '@id': st } for st in supertype_uris ]\n )\n else:\n d['annal:supertype_uri'] = (\n [ { '@id': type_uri+\"/super1\" }\n , { '@id': type_uri+\"/super2\" }\n ])\n return d",
"def dict(self):\r\n d = {\r\n \"key\": self.field,\r\n \"value_count\": self.value_count,\r\n \"record_count\": self.record_count,\r\n \"value_ratio\": self.value_ratio,\r\n \"storage_types\": list(self.storage_types),\r\n \"null_count\": self.null_count,\r\n \"null_value_ratio\": self.null_value_ratio,\r\n \"null_record_ratio\": self.null_record_ratio,\r\n \"empty_string_count\": self.empty_string_count,\r\n \"unique_storage_type\": self.unique_storage_type\r\n }\r\n\r\n if self.distinct_overflow:\r\n d[\"distinct_overflow\"] = self.distinct_overflow,\r\n d[\"distinct_values\"] = []\r\n else:\r\n d[\"distinct_values\"] = list(self.distinct_values)\r\n\r\n return d",
"def field_wrapper(field):\n return {'field': field}",
"def dict_json(self, record):\n json_dict = {}\n\n fields_dict = record.fields_get()\n\n for name, field in fields_dict.items():\n if eval('record.' + name):\n # id and name (if exists) for M2O, O2M, M2M\n if field['type'] == 'many2one':\n json_dict[name] = {\n 'id': eval('record.' + name + '.id')\n }\n sub_fields_dict = eval('record.' + name + \".fields_get()\")\n if 'name' in sub_fields_dict and sub_fields_dict['name']['type'] in ['char', 'text', 'html']:\n json_dict[name]['name'] = eval('record.' + name + '.name')\n elif field['type'] == 'many2many' or field['type'] == 'one2many':\n json_dict[name] = []\n for sub_rec in eval('record.' + name):\n element = {'id': sub_rec.id}\n sub_fields_dict = sub_rec.fields_get()\n if 'name' in sub_fields_dict and sub_fields_dict['name']['type'] in ['char', 'text', 'html']:\n element['name'] = sub_rec.name\n\n json_dict[name].append(element)\n # if binary, change it in string\n elif field['type'] == 'binary':\n json_dict[name] = eval('record.' + name).decode('utf-8') if type(eval('record.' + name)) is bytes else eval('record.' + name)\n # if other, the value\n else:\n json_dict[name] = eval('record.' + name)\n\n return json_dict",
"def to_dict(self):\n dct = dict(zip(self._fields, self))\n dct['type'] = type(self).__name__\n return dct",
"def _generate(self, custom_data: typing.Dict) -> typing.Dict:\n info = {}\n for field in self.fields:\n if field.name in custom_data:\n info[field.name] = custom_data[field.name]\n else:\n info[field.name] = field.generate(info)\n\n return info",
"def asPyDict(self):\n fieldDict = dict()\n for kvp in self.keyvaluepair_set.all():\n fieldDict[kvp.key] = kvp.value\n return fieldDict",
"def formatDictType(payload, updateFields, parentKey=''):\n for key in payload.keys():\n updateFields.append(parentKey + str(key))\n data = {}\n for key, val in payload.iteritems():\n valueType = type(val)\n key = str(key)\n if valueType is None:\n data.update({ key: { 'nullValue': val } })\n if valueType is int:\n data.update({ key: { 'integerValue': val } })\n if valueType is float:\n data.update({ key: { 'doubleValue': val } })\n if valueType is str:\n data.update({ key: { 'stringValue': val } })\n if valueType is unicode:\n data.update({ key: { 'stringValue': str(val) } })\n if valueType is bool:\n data.update({ key: { 'booleanValue': val } })\n if valueType is datetime:\n data.update({ key: { 'timestampValue': str(val).replace(' ', 'T') } })\n if valueType is list:\n formattedList = formatListType(val, updateFields)\n data.update({ key: { 'arrayValue': formattedList } })\n if valueType is dict:\n formattedDict = formatDictType(val, updateFields, (key + '.'))\n data.update({ key: { 'mapValue': { 'fields': formattedDict } } })\n return data",
"def _datastore_fields(fs):\n return [{\n 'id': f['datastore_id'],\n 'type': _column_type(f['datastore_type'])}\n for f in fs]",
"def _prepare_external_id_vals(self, cr, uid, res_id, ext_id, referential_id, context=None):\n ir_model_data_vals = {\n 'name': self.prefixed_id(ext_id),\n 'model': self._name,\n 'res_id': res_id,\n 'referential_id': referential_id,\n 'module': 'extref/' + self.pool.get('external.referential').\\\n read(cr, uid, referential_id, ['name'])['name']\n }\n return ir_model_data_vals",
"def format_data(self, _item_fields, special=None):\n\n if special:\n _item_fields[\"special\"] = special\n\n return _item_fields"
] | [
"0.71578294",
"0.6062752",
"0.5871339",
"0.58003277",
"0.5790452",
"0.5784216",
"0.5772315",
"0.5709299",
"0.56147534",
"0.55840975",
"0.5571692",
"0.5539155",
"0.5517967",
"0.5512531",
"0.5494441",
"0.54721624",
"0.5455877",
"0.5452799",
"0.544782",
"0.54249597",
"0.54240185",
"0.5420354",
"0.5417039",
"0.5413699",
"0.5407098",
"0.5379233",
"0.5346001",
"0.532402",
"0.5318277",
"0.530331"
] | 0.7510255 | 0 |
This method takes an item's values and copies them to a new item in the target app. | def copy_item(self, origin_item_id, target_app_id, field_conversor, extra_data = None, silent=False, hook=True):
source_item = self.get_item(origin_item_id, external_id=False)
if extra_data is None:
destination_dict = {}
else:
destination_dict = extra_data
try:
for origin, destination in field_conversor:
try:
origin = int(origin)
source_field = source_item["values"][origin]
except ValueError:
related_id, field_id = origin.split('#')
source_field = source_item["values"][int(related_id)]["value"]["values"][int(field_id)]
if source_field['type'] == "image":
new_value = []
for value in source_field['value']:
new_value.append(self.copy_file(value[1])['file_id'])
else:
new_value = source_field['value']
destination_dict[destination] = new_value
except KeyError as e:
self.comment(
'item',
origin_item_id,
{'value': 'Ha habido un error con la llave %s (IM lo sabe interpretar :) ) pero probablemente no están todos los campos que pide la aplicación nacional y por eso no se pudo crear.' % str(e)}
)
return 'Key Error: ' + str(e)
new_item = self.create_item({"fields":destination_dict}, app_id = target_app_id)
self.comment(
'item',
origin_item_id,
{'value': 'Se ha copiado el EP al espacio nuevo de PODIO exitosamente en la direccion %s' % new_item['link']}
)
return new_item
#make new item
#return return code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Copy(self, items):\r\n \r\n self.Clear()\r\n\r\n for item in items._items:\r\n self._items.append(item)\r\n \r\n self._selection = items._selection\r\n self._rowCount = items._rowCount\r\n self._columnCount = items._columnCount\r\n\r\n self._backgroundColour = items._backgroundColour\r\n self._textColour = items._textColour\r\n self._selectionColour = items._selectionColour\r\n self._selectionOutlineColour = items._selectionOutlineColour\r\n self._itemFont = items._itemFont",
"def Copy(self, item):\r\n\r\n self._id = item._id\r\n self._name = item._name\r\n self._title = item._title\r\n self._isGroup = item._isGroup\r\n self._breakColumn = item._breakColumn\r\n self._rect = item._rect\r\n self._font = item._font\r\n self._textColour = item._textColour\r\n self._bitmap = item._bitmap\r\n self._description = item._description\r\n self._rowPos = item._rowPos\r\n self._colPos = item._colPos\r\n self._window = item._window",
"def copy(self) -> ItemVariant:\n return ItemVariant(\n self.pak_id,\n self.editor,\n self.vbsp_config,\n self.editor_extra.copy(),\n self.authors.copy(),\n self.tags.copy(),\n self.desc,\n self.icons.copy(),\n self.ent_count,\n self.url,\n self.all_name,\n self.all_icon,\n self.source,\n )",
"def copy(self,item,destName,destDir=None):\n if item == self.lastKey: return\n destDir = destDir or self.dir\n apath = self.dir.join(item)\n apath.copyTo(destDir.join(destName))\n if destDir == self.dir:\n self.data[destName] = installer = copy.copy(self.data[item])\n installer.isActive = False\n self.refreshOrder()\n self.moveArchives([destName],self.data[item].order+1)",
"def copy_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n ExtrasTaggedItem = apps.get_model('extras', 'TaggedItem')\n\n tagged_items_values = TaggitTaggedItem.objects.all().values('id', 'object_id', 'content_type_id', 'tag_id')\n tagged_items = [ExtrasTaggedItem(**tagged_item) for tagged_item in tagged_items_values]\n ExtrasTaggedItem.objects.bulk_create(tagged_items)",
"def pickUpItem(self, app, newItem: Stack):\n\n if newItem.isEmpty(): return\n\n # Prioritize existing stacks of the item first\n for (i, slot) in enumerate(self.inventory):\n stack = slot.stack\n if stack.isInfinite() and stack.item == newItem.item:\n # It just stacks into an infinite slot, so no change\n return\n elif newItem.isInfinite() and stack.item == newItem.item:\n # ditto\n return \n elif stack.amount > 0 and stack.item == newItem.item:\n self.inventory[i].stack.amount += newItem.amount\n return\n\n # If that fails, then just add the item to the next open space\n for (i, slot) in enumerate(self.inventory):\n if slot.isEmpty():\n self.inventory[i].stack = newItem\n return\n \n # TODO: Full inventory??\n 1 / 0",
"def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return",
"def copyItem(self):\n # extract all selected item\n itms = []\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n itms.append(item.data)\n\n # pickle data\n mime = QMimeData()\n mime.setData( self.__mime__ , QByteArray(pickle.dumps(itms)) )\n\n # copy to clipboard\n QApplication.clipboard().setMimeData(mime,QClipboard.Clipboard)\n self.pasteAction.setEnabled(True)",
"def NewItems(self) -> _n_1_t_7:",
"def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])",
"def fill_item(self, args, producing_job):\n pass",
"def convert_items(items):\n for idx in range(len(items)):\n item_name, item_sell_in, item_quality = items[idx].name, items[idx].sell_in, items[idx].quality,\n comp_name = item_name.lower() # the name with which we compare by\n\n new_item = items[idx]\n if 'aged brie' in comp_name:\n new_item = AgedItem(item_name, item_sell_in, item_quality)\n elif 'sulfuras' in comp_name:\n new_item = LegendaryItem(item_name, item_sell_in, item_quality)\n elif 'conjured' in comp_name:\n new_item = ConjuredItem(item_name, item_sell_in, item_quality)\n elif 'backstage passes' in comp_name:\n new_item = BackstagePass(item_name, item_sell_in, item_quality)\n items[idx] = new_item\n\n return items",
"def clone_item(item):\n i = h5Item(item.text(0))\n i.path = item.path\n i.listIndex = item.dataIndex\n i.originalIndex = item.originalIndex\n i.data = item.data\n return i",
"def addItems(*args):",
"def _stash_items(sender, **kwargs):\n json_values = kwargs[\"json_values\"]\n stash = kwargs[\"stash\"]\n\n if \"items\" not in json_values:\n return\n\n json_items = json_values[\"items\"]\n\n stash[\"updated_items\"] = []\n stash[\"new_items\"] = []\n\n # create the items\n for item in json_items:\n # put the item in either new or updated items\n if \"id\" in item:\n stash[\"updated_items\"].append(ItemStash(item))\n else:\n stash[\"new_items\"].append(ItemStash(item))",
"def addItem(*args):",
"def addItem(*args):",
"def addItem(*args):",
"def test_add_value_singlevalue_singlevalue(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n il.add_value(\"name\", \"bar\")\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\"]})",
"def test_add_value_singlevalue_list(self):\n input_item = self.item_class(name=\"foo\")\n il = ItemLoader(item=input_item)\n il.add_value(\"name\", [\"item\", \"loader\"])\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(\n ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"item\", \"loader\"]}\n )",
"def push(self, new_item):\n self.items.append(new_item)",
"def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item",
"def copy_item(self, src_key, target_key, nodup=False):\n copied = False\n src_entry = self.get(src_key)\n if (src_entry):\n if ((target_key not in self._key_set) or (not nodup)): # if no target or dups allowed\n copy = src_entry._replace(keyword=target_key, value=src_entry.value)\n self._metadata.append(copy)\n self._update_key_set()\n copied = True\n return copied",
"def _move_item(self, src, dst):\n \"Does nothing\"",
"def item_shared(self, item):\n self.update_item(item)",
"def test_add_value_list_singlevalue(self):\n input_item = self.item_class(name=[\"foo\", \"bar\"])\n il = ItemLoader(item=input_item)\n il.add_value(\"name\", \"qwerty\")\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(\n ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\", \"qwerty\"]}\n )",
"def pasteItem(self):\n # read from clipboard\n mimeData = QApplication.clipboard().mimeData()\n if not mimeData.hasFormat(self.__mime__):\n return None\n\n # extract data\n data = mimeData.data(self.__mime__)\n if data:\n try:\n items = pickle.loads(data.data())\n for itm in items:\n # extract item info\n itemType = int(itm['item-type'])\n itemText = itm['item-text']\n itemData = itm['item-data']\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n itemId = self.getItemId()\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=self.scene.currentMousePosition, itemData=itemData )\n except Exception as e:\n self.error( \"unable to deserialize %s\" % str(e) )",
"def copy(self):\n return self.__class__(self.items, self.is_cloud)",
"def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)",
"def item_rewrite(self, item, str_new_item):\n\t\treturn self._modify_object(item=item, new_item=str_new_item)"
] | [
"0.6275045",
"0.6155121",
"0.60563624",
"0.59427583",
"0.58470327",
"0.5806149",
"0.5760919",
"0.5711546",
"0.5645925",
"0.5640575",
"0.56116366",
"0.5557648",
"0.5505642",
"0.54528534",
"0.54519296",
"0.54476357",
"0.54476357",
"0.54476357",
"0.5418839",
"0.5403946",
"0.53963476",
"0.53780437",
"0.53756934",
"0.5364903",
"0.53642035",
"0.53554904",
"0.53397846",
"0.53353894",
"0.5316121",
"0.5303045"
] | 0.68416226 | 0 |
Move the fist based on mouse position. | def update(self):
pos = pygame.mouse.get_pos()
self.rect.midtop = pos
if self.punching:
self.rect.move_ip(5, 10) # move fist position in place | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def follow_mouse(self, mouse):\n half_width = self.width() / 2\n self.left = mouse.get_x() - half_width\n self.right = mouse.get_x() + half_width",
"def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()",
"def mousePosition(self):",
"def move(self):\n self.val = (pygame.mouse.get_pos()[\n 0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini\n if self.val < self.mini:\n self.val = self.mini\n if self.val > self.maxi:\n self.val = self.maxi",
"def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)",
"def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)",
"def updateFirstPoint(self):\n x, y = self.machine.plot.dataToPixel(*self._firstPos, check=False)\n\n offset = self.machine.getDragThreshold()\n points = [(x - offset, y - offset),\n (x - offset, y + offset),\n (x + offset, y + offset),\n (x + offset, y - offset)]\n points = [self.machine.plot.pixelToData(xpix, ypix, check=False)\n for xpix, ypix in points]\n self.machine.setSelectionArea(points, fill=None,\n color=self.machine.color,\n name='first_point')",
"def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']",
"def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)",
"def move_mouse(self, pos):\n dx, dy = self.distance_from_crosshairs(pos[0], pos[1])\n pag.move(dx, dy)",
"def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)",
"def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)",
"def move_to_position1(self):",
"def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)",
"def moveTo(self, x, y):\n\n\t\tif x < 0:\n\t\t\tself.x = 0\n\t\telif x > self.maxX:\n\t\t\tself.x = self.maxX\n\t\telse:\n\t\t\tself.x = x\n\n\t\tif y < 0:\n\t\t\tself.y = 0\n\t\telif y > self.maxY:\n\t\t\tself.y = self.maxY\n\t\telse:\n\t\t\tself.y = y \n\n #print self.x, self.y\n\t\tautopy.mouse.move(self.x,self.y)",
"def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)",
"def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)",
"def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10)",
"def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]",
"def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)",
"def move(self):\n \n self.position = self.explore()",
"def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]",
"def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)",
"def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')",
"def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos",
"def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y",
"def move_to_position2(self):",
"def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()",
"def move(point):\n # wrapper just so we don't have to import pymouse separately\n m = PyMouse()\n m.move(*point)",
"def _move(self, event):\n if self._current_tower.get_value() > self._coins:\n return\n\n #move the shadow tower to mouse position\n position = event.x, event.y\n self._current_tower.position = position\n\n legal, grid_path = self._game.attempt_placement(position)\n\n #find the best path and covert positions to pixel positions\n path = [self._game.grid.cell_to_pixel_centre(position)\n for position in grid_path.get_shortest()]\n\n #Task 1.2 (Tower placement): Draw the tower preview here\n self._view.draw_preview(self._current_tower, legal)\n self._view.draw_path(path)"
] | [
"0.71755165",
"0.7101349",
"0.7000044",
"0.6952539",
"0.6935647",
"0.6929391",
"0.68467486",
"0.68411225",
"0.6729126",
"0.6657786",
"0.66523236",
"0.66523236",
"0.6651963",
"0.66329193",
"0.6623596",
"0.6551806",
"0.6551806",
"0.65431446",
"0.6522208",
"0.65183115",
"0.64927393",
"0.6476802",
"0.646403",
"0.64126927",
"0.64087945",
"0.6387149",
"0.63761306",
"0.6353296",
"0.6329661",
"0.63193476"
] | 0.71138275 | 1 |
Move the monkey across the scree, turnaround when it reaches the end. | def _walk(self):
new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame
if self.rect.left < self.area.left or self.rect.right > self.area.right:
self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen
new_pos = self.rect.move((self.move, 0))
self.image = pygame.transform.flip(
self.image, 1, 0
) # mirror the chimp to make it looks like turning around
self.rect = new_pos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n move()\n move()\n pick_beeper()\n move()\n turn_left()\n for i in range(2):\n move()\n put_beeper()\n turn_around()\n move_to_wall()\n turn_right()\n move_to_wall()\n turn_around()",
"def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()",
"def move_buildings(self):",
"def move_tie_fighters(self):\n for i in range(len(self.tie_fighters)):\n self.tie_fighters[i].move_tie_fighter()",
"def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move",
"def move(self):\n pass",
"def move_lift_up():\n return _move_lift(1)",
"def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()",
"def main():\r\n\r\n movetwotimes()\r\n pick_beeper()\r\n move()\r\n turn_left()\r\n movetwotimes()\r\n put_beeper()\r\n turn_left()\r\n turn_left()\r\n movetwotimes()\r\n rotatethreetimes()\r\n movetwotimes()\r\n move()\r\n turn_left()\r\n turn_left()",
"def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r",
"def move(self):\n self.old_tail = self.body[-1][:] # save old position of last block\n self.head[0] += self.direction[0] # moves head\n self.head[1] += self.direction[1]\n \n self.head[0] = (self.head[0] + self.xMaxSize) % self.xMaxSize\n self.head[1] = (self.head[1] + self.yMaxSize) % self.yMaxSize\n \n if self.head in self.body[1:]: # if snakes hits himself\n self.alive = False\n self.body.insert(0, self.body.pop()) # each block is replace by predecessor\n self.body[0] = self.head[:] # first block is head",
"def step(self, move):",
"def move_forward():\n pass",
"def move(self):\n if self.adjustment > 0:\n # The player moves up a ladder\n old_position = self.position\n # In the next move, player on top of ladder\n\n self.position = old_position - self.dropped_steps\n # player drops steps\n super().move()\n # player move\n die = self.position - old_position + self.dropped_steps - \\\n self.adjustment\n \"\"\" \n the current position of the player is: old position - \n dropped_steps + die + adjustment. Rearranging to find the die.\n \n \"\"\"\n if die < self.dropped_steps:\n self.position = old_position\n # if die is less than dropped_steps, the player will stand still.\n else:\n super().move()\n # if not climbling a ladder, then the player make a regular move",
"def move(self):\n \n self.position = self.wander()",
"def _move_forward(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tif(self.y<=798):\n\t\t\tself.y = self.y+1\n\t\t\tif Board.board[self.x][self.y]=='0':\n\t\t\t\tMario.score += 1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='P':\n\t\t\t\tMario.lives+=1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_1-up.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='A':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tMario.attack = 1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_powerup.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='@':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tMario.lives-=1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_mariodie.wav\"])\n\t\t\t\tif Mario.lives<=0:\n\t\t\t\t\tcall([\"aplay\",\"-q\",\"smb_gameover.wav\"])\n\t\t\t\t\treturn \"exit\"\n\t\t\t\tos.system('clear')\n\t\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left\",Mario.lives)\n\t\t\t\tMario.respawn(self.x,self.y)\n\t\t\t\ttime.sleep(2)\n\t\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\n\t\t\telif(Board.board[self.x][self.y]=='/'):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x-1][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='I':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tBoard.bonus_round()\n\n\t\t\telif Board.board[self.x][self.y]=='K':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tenemy.boss_round()\n\n\t\t\telif(Board.board[self.x][self.y] in obstacles):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y-1]='M'\n\n\t\t\telif((Board.board[self.x+1][self.y-1]=='/' or Board.board[self.x+1][self.y-1]=='T') and Board.board[self.x+1][self.y]==' '):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y+1]='M'\n\t\t\t\tMario.go_down(self)\n\t\t\telse:\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\tif( self.y-1 >= ((Board.prev_j+Board.prev_k)/2) ):\n\t\t\tos.system('clear')\n\t\t\tBoard.prev_j += 1 \n\t\t\tBoard.prev_k += 1\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\telse:\n\t\t\tos.system('clear')\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)",
"def warm_up(self):\n self.velocity = self.steering_behaviours.calculate()\n self.pos += self.velocity\n self.pos = Point(int(self.pos.x), int(self.pos.y))\n if not self.is_moving():\n if self.steering_behaviours.target == self.soccer_field.ball.pos:\n # let's go back towards where I was.\n self.steering_behaviours.target = self.initial_pos\n else:\n # let's go towards the ball.\n self.steering_behaviours.target = self.soccer_field.ball.pos\n self.direction = Vec2d(self.steering_behaviours.target - self.pos).normalized()",
"def become_tower(self):\n self.stack_size = 2",
"def turn_around():\n for i in range(2):\n turn_left()",
"def move2(self):\n\n options = self.location.exits.keys()\n for key in options:\n if self.location.exits[key] == p.location:\n self.location.objects.remove(a)\n self.location = p.location\n self.location.objects.append(a)\n print('fred entered the room')\n self.attack(['attack', str(p.name)])\n break\n else:\n self.move1()",
"def run(self):\n # type: () -> None\n self.move_to(self.location)",
"def main():\n for i in range(4):\n fix_tower()\n if front_is_clear():\n move_to_next()",
"def turn(self):\n pass",
"def stop(self):\n self.move(0, 0)",
"def run(self):\n self.spawn()\n while self.is_alive:\n self.move()\n time.sleep(.2)",
"def interaction_void(self) -> None:\n self.grid.obj_list.swap_obj(self.moving_character, self.target)",
"def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()",
"def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()",
"def stop(self):\n self.move(None)",
"def move_limit_tie_fighters(self):\n for i in range(len(self.tie_fighters)):\n self.tie_fighters[i].move_limitation()"
] | [
"0.6293175",
"0.6188732",
"0.61269486",
"0.6100515",
"0.60992014",
"0.6092559",
"0.60872597",
"0.60869735",
"0.60626805",
"0.60558814",
"0.6001239",
"0.59711635",
"0.59577036",
"0.594625",
"0.5932024",
"0.5930882",
"0.5926476",
"0.5900304",
"0.58895755",
"0.5858425",
"0.58563244",
"0.58550185",
"0.5836387",
"0.579454",
"0.57292724",
"0.5721943",
"0.5692315",
"0.5692315",
"0.5685473",
"0.5675229"
] | 0.6345669 | 0 |
Test that two candidates with the same name is considered equal. | def test_equal(self):
candidate1 = pyrankvote.Candidate("Per")
candidate2 = pyrankvote.Candidate("Per")
candidate3 = pyrankvote.Candidate("Aase")
self.assertEqual(candidate1, candidate2, "These candidates should be equal/the same candidate.")
self.assertNotEqual(candidate1, candidate3, "These candidates should NOT be equal/the same candidate.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n return self.name == other.name",
"def __eq__(self, name):\n return self.name == name",
"def __eq__(self, other):\n return self.getName() == other.getName()",
"def __eq__(self, other: 'Pair') -> bool:\n return self.names == other.names",
"def __eq__(self, other):\n\n return (self.name) == (other.name)",
"def __eq__(self, other):\r\n return self.__name == other.__name",
"def __eq__(self, other):\r\n return self.__name == other.__name",
"def __eq__(self, other):\n return self.last_name == other.last_name and self.first_name == other.first_name",
"def __eq__(self, other) -> bool:\n return self.Firstname == other.Firstname and self.LastName == other.LastName",
"def __eq__(self, other):\n if (self.name == other.name):\n return \"Equal\"\n else:\n return \"Not Equal\"",
"def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())",
"def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())",
"def __eq__(self, other):\n return(\n self.name == other.name and\n self.hand == other.hand and\n self.score == other.score\n )",
"def __eq__(self, other):\n if self is other:\n return True\n if type(self) == type(other):\n return self._name == other._name and self._calories == other._calories and \\\n self._carbohydrates == other._carbohydrates and self._fat == other._fat\\\n and self._proteins == other._proteins",
"def __eq__(self, other):\n\n return self.name == other.name and self.price_range == other.price_range",
"def isSameName(self, other):\n if not isinstance(other, self.__class__):\n return 0\n if self.data.has_key('name') and \\\n other.data.has_key('name') and \\\n build_name(self.data, canonical=0) == \\\n build_name(other.data, canonical=0):\n return 1\n if self.accessSystem == other.accessSystem and \\\n self.characterID is not None and \\\n self.characterID == other.characterID:\n return 1\n return 0",
"def __eq__(self, other):\n contentsmatchfail = False\n equal = False\n for i in self.contents:\n if i in other.contents:\n pass\n else:\n contentsmatchfail = True\n for i in other.contents:\n if i in self.contents:\n pass\n else:\n contentsmatchfail = True\n if self.name == other.name and self.name == other.name and contentsmatchfail == False:\n equal = True\n return equal",
"def testEquality(self):\n pass",
"def is_consistent(self, other):\n return self.name != other.name or self.type is other.type",
"def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color",
"def __eq__(self, rhs):\n return (\n (self.name == rhs.name)\n and (self.args == rhs.args)\n and (self.varargs == rhs.varargs)\n and (self.keywords == rhs.keywords)\n )",
"def test_identical(self):\n write this test!",
"def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name",
"def test_name(self):\n molecule1 = Molecule()\n molecule1.name = None\n\n molecule2 = Molecule()\n molecule2.name = \"\"\n assert molecule1.name == molecule2.name\n\n name = \"benzene\"\n molecule = Molecule()\n molecule.name = name\n assert molecule.name == name",
"def __eq__(self, other):\n return (((not self.name and not other.name) or\n self.name == other.name) and\n self.fields == other.fields)",
"def names_are_equal(filesystem_name, fixture_name):\n if filesystem_safe(filesystem_name) == fixture_name:\n return True\n return False",
"def __eq__(self, other):\n if type(self) != type(other):\n return False\n else:\n return ((self.name == other.name) and (self.value == other.value)\n and (self.time == other.time))",
"def is_equal(self, other):\n return (other.__class__ == self.__class__\n and other.subscript == self.subscript\n and other.swept_inames == self.swept_inames)",
"def test_equal(self):\n self.assertTrue(self.a == self.a)\n self.assertFalse(self.a != self.a)",
"def test_equals(self):\n self.assertEqual(cmp(u\"a\", u\"a\"), 0)\n self.assertEqual(cmp(1, 1), 0)\n self.assertEqual(cmp([1], [1]), 0)"
] | [
"0.7137341",
"0.6986297",
"0.69103146",
"0.6893109",
"0.68889",
"0.6812805",
"0.6812805",
"0.67275023",
"0.6676643",
"0.66710836",
"0.662369",
"0.662369",
"0.6585671",
"0.6563278",
"0.6547755",
"0.651176",
"0.64836264",
"0.6444751",
"0.6440682",
"0.64404285",
"0.6353767",
"0.6323626",
"0.63184303",
"0.63184303",
"0.63184273",
"0.62452257",
"0.62394106",
"0.61826",
"0.61705714",
"0.61522275"
] | 0.7321387 | 0 |
Test that voting with two equal candidates raises DuplicateCandidateError | def test_raise_duplicate_candidate_error(self):
candidate1 = pyrankvote.Candidate("Per")
candidate2 = pyrankvote.Candidate("Per")
candidate3 = pyrankvote.Candidate("Aase")
def tester(_):
pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])
msg = "Candidate 1 and 2 is equal and should raise duplicate candidate error"
self.assertRaises(pyrankvote.models.DuplicateCandidatesError, tester, msg)
# TEST THE OPPOSITE
candidate1 = pyrankvote.Candidate("Per")
candidate2 = pyrankvote.Candidate("Maria")
candidate3 = pyrankvote.Candidate("Aase")
# This should NOT raise an error
pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_equal(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n self.assertEqual(candidate1, candidate2, \"These candidates should be equal/the same candidate.\")\n self.assertNotEqual(candidate1, candidate3, \"These candidates should NOT be equal/the same candidate.\")",
"def test_new_candidate_objects(self):\n\n class NewCandidate:\n def __init__(self, name):\n self.name = \"New \"+name\n def __hash__(self):\n return hash(self.name)\n\n candidate1 = NewCandidate(\"Per\")\n candidate2 = NewCandidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])",
"def test_multiple_vote(self) -> None:\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n vote=1,\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def test_DECISION_repeat_conflict(self, commit):\n self.assertRaises(AssertionError, lambda:\n self.node.fake_message(Decision(slot=1, proposal=PROPOSAL2)))",
"def test_tally_no_candidates(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n\n session.add(raceA)\n session.commit()\n\n with self.assertRaises(NoCandidates):\n self.wta.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.proportional.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.schulze.check_race(raceA.id)",
"def test_vote_twice(self):\n idea = models.Idea(creator=random_user(), title='Transit subsidy to Mars', \n text='Aliens need assistance.', state=self.state)\n idea.save()\n\n self.client.login(username='testuser', password='password')\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)\n\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)",
"def test_POST_vote(self):\n self.init_elect_types()\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id,\n )\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n session.add(raceA)\n session.commit()\n\n candidateA = models.Candidate(\n title = \"Candidate A\",\n race_id = raceA.id)\n session.add(candidateA)\n session.commit()\n\n data = {\n \"value\": 1,\n \"user_id\": userA.id,\n \"candidate_id\": candidateA.id\n }\n\n response = self.client.post(\"/api/votes\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n data = json.loads(response.data.decode(\"ascii\"))\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(urlparse(response.headers.get(\"Location\")).path,\n \"/api/elections/{}\".format(electionA.id))\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"value\"], 1)\n self.assertEqual(data[\"candidate_id\"], candidateA.id)\n\n votes = session.query(models.Vote).all()\n self.assertEqual(len(votes), 1)\n\n vote = votes[0]\n self.assertEqual(vote.user_id, userA.id)\n\n # Try POST same vote again to test for already voted error\n vote_count = session.query(models.Vote).filter(\n models.Vote.user_id == userA.id,\n models.Vote.candidate_id == candidateA.id).count()\n self.assertEqual(vote_count, 1)\n\n data = {\n \"value\": 1,\n \"user_id\": userA.id,\n \"candidate_id\": candidateA.id\n }\n\n response = self.client.post(\"/api/votes\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n data = json.loads(response.data.decode(\"ascii\"))\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"],\n \"User with id {} has already voted for candidate with id {}.\".format(\n userA.id, candidateA.id))",
"def test_choose_interview_slot_if_slot_is_already_taken(self):\n self.interview_slot1.student = self.student2\n self.interview_slot1.save()\n url = reverse('course_interviews:confirm_slot')\n data = {\n \"slot_id\": self.interview_slot1.id,\n \"student_uuid\": self.student1.uuid\n }\n response = self.client.post(url, data, follow=True)\n\n self.assertEqual(response.status_code, 404)",
"def test_raise_error_if_not_all_obj_are_candidate_objects(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = \"Aase\"\n\n def tester(_):\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])\n\n msg = \"Candidate 2 is a string, not a Candidate, and should raise a TypeError\"\n self.assertRaises(TypeError, tester, msg)\n\n # TEST THE OPPOSITE\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])",
"def test_race_voteval_check(self):\n self.populate_database()\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 1)\n\n self.raceB.election_type = \"Schulze\"\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 4)\n\n ### WHY DO THESE WORK:\n self.raceB.candidates.append(self.candidateAB)\n self.assertEqual(len(self.raceB.candidates), 5)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 5)\n\n self.raceB.candidates.remove(self.candidateBD)\n # Throw a couple of wrenches in the works\n self.raceB.max_vote_val = 1\n self.raceB.min_vote_val = 7\n\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 4)\n\n ### BUT THESE DO NOT ?!?!? ( obviously indirect changes to the \n ### db/collection aren't handled by the validator event)\n # session.delete(self.candidateBD)\n # self.candidateAB.race_id = self.raceB.id",
"def test_success(self):\n disposable_under_min = Disposable.objects.create(name=self.DISPOSABLE_NAME + '_1')\n disposable_over_min = Disposable.objects.create(name=self.DISPOSABLE_NAME + '_2')\n category_1 = Category.objects.create(name=self.CATEGORY_NAME + '_1')\n category_2 = Category.objects.create(name=self.CATEGORY_NAME + '_2')\n votes = [\n (disposable_under_min, category_1, settings.MIN_NORMALIZE_COUNT/100),\n (disposable_under_min, category_2, settings.MIN_NORMALIZE_COUNT/50),\n (disposable_over_min, category_1, settings.MIN_NORMALIZE_COUNT),\n (disposable_over_min, category_2, settings.MIN_NORMALIZE_COUNT*3)\n ]\n self.make_votes(votes)\n\n # test when total votes is less than settings.MIN_NORMALIZE_COUNT\n votes_under = DisposableVote.objects.filter(disposable=disposable_under_min)\n tuples_under = votes_to_percentages(votes_under)\n expected_under = [(category_2.name, settings.MIN_NORMALIZE_COUNT/50),\n (category_1.name, settings.MIN_NORMALIZE_COUNT/100)]\n self.assertEqual(expected_under, tuples_under)\n # test when total votes is greater than settings.MIN_NORMALIZE_COUNT\n votes_over = DisposableVote.objects.filter(disposable=disposable_over_min)\n tuples_over = votes_to_percentages(votes_over)\n expected_over = [(category_2.name, 3/4*100), (category_1.name, 1/4*100)]\n self.assertEqual(expected_over, tuples_over)",
"def test_a_user_can_vote_once(self):\n res = self.client().post(\n '/api/v2/auth/login',\n headers=self.get_accept_content_type_headers(),\n data=json.dumps(ADMIN_LOGIN)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n access_token = response_msg[\"data\"][0][\"token\"]\n self.create_meetup(access_token, MEETUP)\n access_token = self.get_access_token(USER_REGISTRATION, USER_LOGIN)\n self.create_question(access_token, QUESTION)\n access_token = self.get_access_token(NEW_USER_REGISTRATION, NEW_USER_LOGIN)\n res = self.client().patch(\n '/api/v2/questions/1/upvote',\n headers=self.get_authentication_headers(access_token)\n )\n res = self.client().patch(\n '/api/v2/questions/1/upvote',\n headers=self.get_authentication_headers(access_token)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n self.assertEqual(res.status_code, 423)\n self.assertEqual(response_msg[\"message\"][\"error\"], \"A user can only vote once\")",
"def test_missing_vote_value(self) -> None:\n self.clear_votes()\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def test_vote_submission(self):\n starting_count = Vote.objects.count()\n data = { \"candidate\":3,\n \"student\":2}\n response = self.client.post(\"/vote/\", data, format='json')\n print(response.data)\n assert response.status_code == status.HTTP_201_CREATED\n assert Vote.objects.count() - starting_count == 1",
"def test_candidates_list(self):\n pass",
"def test_tally_no_votes(self):\n self.populate_database()\n self.electionA.elect_open = False\n with self.assertRaises(NoVotes):\n self.wta.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.proportional.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.schulze.check_race(self.raceA.id)",
"def test_duplicate_entries(self):",
"def test_upvote_then_downvote_same_user_leaves_comment_score_one_less(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)",
"def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())",
"def test_reusableitem_vote_user_count_20_reject(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n\n for index in range(3, 21):\n create_toptenlist(self, 'user_' + index.__str__(), index)\n reference_reusable_item(self, 'user_' + index.__str__(), self.reusableitem_1.id, 'toptenlist_' + index.__str__(), 0)\n\n # submit the change request\n data1 = submit_change_request_1(self, self.user_1)\n updated_reusableitem1 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # users vote for\n for index in range(2, 5):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'yes'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # users vote against\n for index in range(5, 7):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'no'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n updated_reusableitem2 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # the change request should be resolved\n self.assertEqual(updated_reusableitem2.change_request, None)\n\n # it should be accepted\n history_entry = updated_reusableitem2.history[-1]\n self.assertEqual(history_entry['change_request_resolution'], 'rejected')",
"def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)",
"def test_post_duplicate_question(self):\n self.post_question(self.valid_question2)\n\n\n response = self.post_question(self.valid_question2)\n self.assertEqual(response.status_code, 400)",
"def test_reusableitem_vote_user_count_120_reject(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n\n for index in range(3, 121):\n create_toptenlist(self, 'user_' + index.__str__(), index)\n reference_reusable_item(self, 'user_' + index.__str__(), self.reusableitem_1.id, 'toptenlist_' + index.__str__(), 0)\n\n # submit the change request\n data1 = submit_change_request_1(self, self.user_1)\n updated_reusableitem1 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # users vote for\n for index in range(2, 5):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'yes'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # users vote against\n for index in range(5, 10):\n self.client.force_authenticate(user=getattr(self, 'user_' + index.__str__()))\n response = self.client.patch(get_reusable_item_1_url(self), {'vote': 'no'}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n updated_reusableitem2 = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n # the change request should be resolved\n self.assertEqual(updated_reusableitem2.change_request, None)\n\n # it should be rejected\n history_entry = updated_reusableitem2.history[-1]\n self.assertEqual(history_entry['change_request_resolution'], 'rejected')",
"def test_wrong_vote_parameter(self):\n res = self.client().post(\n '/api/v2/auth/login',\n headers=self.get_accept_content_type_headers(),\n data=json.dumps(ADMIN_LOGIN)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n access_token = response_msg[\"data\"][0][\"token\"]\n self.create_meetup(access_token, MEETUP)\n access_token = self.get_access_token(USER_REGISTRATION, USER_LOGIN)\n self.create_question(access_token, QUESTION)\n access_token = self.get_access_token(NEW_USER_REGISTRATION, NEW_USER_LOGIN)\n res = self.client().patch(\n '/api/v2/questions/1/vote',\n headers=self.get_authentication_headers(access_token)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n self.assertEqual(res.status_code, 400)\n self.assertEqual(\n response_msg[\"message\"][\"error\"],\n \"Vote path parameter can either be upvote / downvote\"\n )",
"def test_vote_view_dont_allow_to_vote_multiple_times(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n try:\n with transaction.atomic():\n response1 = self.client.post('/posts/1/vote/', {\"vote\": \"1\"})\n except:\n pass\n self.assertEqual(len(PostVotes.objects.all()), votes_len)",
"def test_confirm_duplicated_consent(self):\n # We create the flow request\n res = self._add_flow_request(flow_request=self.flow_request)\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then we login as mouse since the mock is configured to return 400 with \"mouse\" login\n self.client.login(username='mouse', password='duck')\n # Then we confirm the request.\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id,\n ERRORS_MESSAGE['ALL_CONSENTS_ALREADY_CREATED']),\n fetch_redirect_response=False)",
"def test_upvote_then_downvote_same_user_leaves_post_score_one_less(self):\n post = Post.objects.get(body=\"123ABC Body\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(post.score, DEFAULT_SCORE)\n post = Post.objects.get(body=\"123ABC Body\")\n\n vote1 = Vote.create(post=post, value=1, voter=self.user)\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(post=post, value=-1, voter=self.user)\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE - 1)",
"def test_candidates_retrieve(self):\n pass",
"def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)",
"def test_duplicate_true_detections(self):\n expected_accuracy = dict(num_recall=10, uniq_recall=10, num_precision=20, uniq_precision=10)\n self._run_and_validate(self.duplicate_true_dets, self.ground_truths, expected_accuracy)"
] | [
"0.7190982",
"0.67039883",
"0.63653916",
"0.62804335",
"0.61187637",
"0.60533804",
"0.59969497",
"0.59777725",
"0.5905661",
"0.58996975",
"0.586041",
"0.58293116",
"0.58067715",
"0.57936454",
"0.57888746",
"0.5755017",
"0.5683631",
"0.5670919",
"0.5661115",
"0.5626121",
"0.56144255",
"0.5607269",
"0.55819714",
"0.55713814",
"0.5566856",
"0.5560514",
"0.55499506",
"0.55457497",
"0.5543306",
"0.55425215"
] | 0.7878966 | 0 |
Test that if one of the candidate that are voted for are not a cadidate, that a TypeError is raised | def test_raise_error_if_not_all_obj_are_candidate_objects(self):
candidate1 = pyrankvote.Candidate("Per")
candidate2 = "Aase"
def tester(_):
pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])
msg = "Candidate 2 is a string, not a Candidate, and should raise a TypeError"
self.assertRaises(TypeError, tester, msg)
# TEST THE OPPOSITE
candidate1 = pyrankvote.Candidate("Per")
candidate2 = pyrankvote.Candidate("Aase")
# This should NOT raise an error
pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tally_no_candidates(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n\n session.add(raceA)\n session.commit()\n\n with self.assertRaises(NoCandidates):\n self.wta.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.proportional.check_race(raceA.id)\n\n with self.assertRaises(NoCandidates):\n self.schulze.check_race(raceA.id)",
"def test_tally_no_votes(self):\n self.populate_database()\n self.electionA.elect_open = False\n with self.assertRaises(NoVotes):\n self.wta.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.proportional.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.schulze.check_race(self.raceA.id)",
"def test_wrong_input_type(self):\n with self.assertRaises(TypeError):\n votes_to_percentages(['not', 'a', 'queryset'])\n with self.assertRaises(TypeError):\n votes_to_percentages(Disposable.objects.all())",
"def test_empty_votes(self):\n with self.assertRaises(ValueError):\n votes_to_percentages(DisposableVote.objects.none())",
"def test_tally_no_races(self):\n self.init_elect_types()\n\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id)\n\n session.add(electionA)\n session.commit()\n\n with self.assertRaises(NoRaces):\n self.wta.check_race(1)\n\n with self.assertRaises(NoRaces):\n self.proportional.check_race(1)\n\n with self.assertRaises(NoRaces):\n self.schulze.check_race(1)",
"def test_raise_duplicate_candidate_error(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n def tester(_):\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])\n\n msg = \"Candidate 1 and 2 is equal and should raise duplicate candidate error\"\n self.assertRaises(pyrankvote.models.DuplicateCandidatesError, tester, msg)\n\n # TEST THE OPPOSITE\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Maria\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])",
"def test_missing_vote_value(self) -> None:\n self.clear_votes()\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def violated(self) -> bool:\n ...",
"def test__validate_owner__1():\n for field_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_owner(field_value)",
"def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )",
"def is_acceptable(self):",
"def test_subscribe_wrong_type_provided(self):\n with self.assertRaises(TypeError):\n SubscriptionManager.subscribe(TypeError())\n\n # Check state not altered\n self.assertFalse(self.braintree_customer.active)\n self.assertFalse(self.braintree_customer.pending_cancel)\n self.assertIsNone(self.braintree_customer.expiry_date)",
"def test_invalid_type_input(self):\n\n with self.assertRaises(TypeError):\n sv.match('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.select('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.filter('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.comments('div', \"not a tag\")",
"def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False",
"def test_cancellations(self):\n self.assertEqual(self.meter * self.imeter, 1)\n self.assertEqual(self.second * self.isecond, 1)\n self.assertEqual(self.kgram * self.ikgram, 1)",
"def check_dates(dates):\n for date in dates:\n if type(date) != datetime.datetime:\n raise TypeError('Input date, %s, not datetime object' % date)",
"def verifyAcceptedProposals(self, accepted_proposals):\n self.assertIsInstance(accepted_proposals, dict)\n for k, v in accepted_proposals.iteritems():\n self.assertIsInstance(k, int)\n self.assertIsInstance(v, tuple)\n self.assertEqual(len(v), 2)\n self.assertIsInstance(v[0], Ballot)\n self.assertIsInstance(v[1], Proposal)",
"def test_equal(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n self.assertEqual(candidate1, candidate2, \"These candidates should be equal/the same candidate.\")\n self.assertNotEqual(candidate1, candidate3, \"These candidates should NOT be equal/the same candidate.\")",
"def can_create_election(user_id, user_info):\n return True",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def test_race_voteval_check(self):\n self.populate_database()\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 1)\n\n self.raceB.election_type = \"Schulze\"\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 4)\n\n ### WHY DO THESE WORK:\n self.raceB.candidates.append(self.candidateAB)\n self.assertEqual(len(self.raceB.candidates), 5)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 5)\n\n self.raceB.candidates.remove(self.candidateBD)\n # Throw a couple of wrenches in the works\n self.raceB.max_vote_val = 1\n self.raceB.min_vote_val = 7\n\n self.assertEqual(len(self.raceB.candidates), 4)\n self.assertEqual(self.raceB.min_vote_val, 0)\n self.assertEqual(self.raceB.max_vote_val, 4)\n\n ### BUT THESE DO NOT ?!?!? ( obviously indirect changes to the \n ### db/collection aren't handled by the validator event)\n # session.delete(self.candidateBD)\n # self.candidateAB.race_id = self.raceB.id",
"def test_compute_correlation_invalid_comparison_mode(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary2, 'foo',\r\n 'pearson', 'two-sided', 999, 0.90)",
"def check_validity(self):",
"def check(self, value: Any) -> None:\n if not isinstance(value, self.oktype):\n raise TypeError(value)",
"def test_rr_bcibad(results):\n ci1 = results.effect_ci(citype=\"Some Unsupported Type\")\n assert np.all(np.isnan(ci1))",
"def _is_denies_valid(self):\n if not isinstance(self.denies_, list):\n raise TypeError(\n 'denies type is %s but expected type is list: %s' % (\n type(self.denies_), self.denies_))\n\n for i, deny in enumerate(self.denies_):\n if not isinstance(deny, str):\n raise TypeError(\n 'denies[%s] type is %s but expected type is str: %s' % (\n i, type(deny), deny))",
"def is_codon_correct(input_codon):\n if type(input_codon) == float:\n return False\n\t\t\n allowed_bases = ['A', 'T', 'C', 'G', 'N', '?', '-']\n\n for base in input_codon:\n if base in allowed_bases:\n continue\n else:\n print(\"Your codon is incorrect\")\n return False\n\n return True",
"def test_bad_curie_in_list():\n with pytest.raises(ValidationError):\n pub = Publication(id='PMID:123', mesh_terms=['foo:bar', 'bad_curie'])",
"def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())",
"def is_inequality(self): \n return False"
] | [
"0.68555146",
"0.6602259",
"0.6469301",
"0.60011166",
"0.59726834",
"0.58263236",
"0.5738305",
"0.5644295",
"0.5516878",
"0.548198",
"0.5445231",
"0.5321506",
"0.53020644",
"0.52884585",
"0.5283049",
"0.5281973",
"0.52745837",
"0.52628803",
"0.52594995",
"0.52544856",
"0.52496463",
"0.5205891",
"0.51931024",
"0.51918316",
"0.51809937",
"0.5157058",
"0.5153029",
"0.51463026",
"0.514586",
"0.5132248"
] | 0.70258445 | 0 |
Make a sum of integrals CpT for calculate dST and dHT | def CpT(dict_, T_react): # T_column - name of the column in of Cp temperature in Data
T = T_react
if not dict_['T(Cp)']:
return 0, 0
else:
CpT_S_ = dict_['a']*(math.log(T/298)) + dict_['b']*math.pow(10,-3)*(T-298) - 0.5*dict_['c']*math.pow(10,6)*(math.pow(T, -2) - math.pow(298,-2)) + dict_['d']*(0.5*math.pow(10,-6)*(math.pow(T,2) - math.pow(298,2)))
CpT_H_ = (dict_['a']*(T - 298) + dict_['b']*0.5*math.pow(10,-3)*(math.pow(T,2) - math.pow(298,2)) + dict_['c']*(math.pow(10,6)*(math.pow(298,-1) - math.pow(T, -1))) + dict_['d']*(1/3)*(math.pow(10,-6)*(math.pow(T,3) - math.pow(298,3))))
return CpT_S_, CpT_H_
'''
elif isinstance(dict_['T(Cp)'], tuple): # This part doesn`t check!
"""If more then one values of T(Cp) and 'a', 'b', 'c', 'd' this part calculate a sum of integrals of CpT"""
T_start = 298 # First temperature of integral calculation
dCpT_S = []
dCpT_H = []
for x in range(len(dict_['T(Cp)'])):
if dict_['T(Cp)'][x] > T_react:
T = T_react
else:
T = dict_['T(Cp)'][x]
CpT_S_ = (dict_['a'][x]*math.log(T/298)) + (dict_['b'][x]*math.pow(10,-3)*(T-298)) - (0.5*dict_['c'][x]*(math.pow(T, -2) - math.pow(298,-2))) + (dict_['d'][x]*(0.5*math.pow(10,-6)*(math.pow(T,2) - math.pow(298,2))))
CpT_H_ = (dict_['a'][x]*(T - 298) + (dict_['b'][x]*(0.5*math.pow(10,-3)*(math.pow(T,2))) - math.pow(298,2)) + (dict_['c'][x]*(math.pow(10,6)*(math.pow(298,-1) - math.pow(T, -1)))) + (dict_['d'][x]*(1/3*math.pow(10,-6)*(math.pow(T,3) - math.pow(298,3)))))
dCpT_S.append(CpT_S_)
dCpT_H.append(CpT_H_)
T_start = dict_['T(Cp)'][x]
if T == T_react:
return (sum(dCpT_S), sum(dCpT_H))
''' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dCdt(t,C,params_unknown, params_known, i):\r\n q_co2_interp,P0,C0,a,b,c,P = params_known\r\n d,M0 = params_unknown\r\n return (1-C)*q_co2_interp[i]/M0 -b/a/M0*(P[i]-P0)*(carbon_prime(C,P[i],P0)-C)-d*(C-C0)",
"def integrate(self, t):",
"def Cintegrate(phi,\n HC, \n dt):\n\n phip1 = phi-HC.dot(phi)*dt\n \n return phip1",
"def e_dtw(t0, t1):\n\n n0 = len(t0)\n n1 = len(t1)\n C = np.zeros((n0 + 1, n1 + 1))\n C[1:, 0] = float('inf')\n C[0, 1:] = float('inf')\n for i in np.arange(n0) + 1:\n for j in np.arange(n1) + 1:\n C[i, j] = eucl_dist(t0[i - 1], t1[j - 1]) + min(C[i, j - 1], C[i - 1, j - 1], C[i - 1, j])\n dtw = C[n0, n1]\n return dtw",
"def T_c(I, T_amb, V, D, R_list, N_cond=1, T_range=[298,323,348], a_s=0.9, e_s=0.9, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):\n\n # def Q_gen(I, R):\n # w = I * I * R\n # return w\n\n # def Q_rad_in(I_sun, A_s, a_s):\n # w = I_sun * D * a_s\n # return w\n\n # def Q_conv(htcoeff, A_s, T_lin, T_amb):\n # w = htcoeff * A_s * (T_line - T_amb)\n # return w\n\n # def Q_rad_out(e_s, A_s, sigma, T_line, T_amb):\n # w = e_s * D * sigma * (T_line**4 - T_amb**4)\n # return w\n\n def reynolds(V, D, v, Mair=1.103):\n r = V * D / v\n return r\n\n def nusselt(Re, Pr):\n a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )\n b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)\n c = (Re / 282000) ** (5.0/8.0)\n n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )\n return n\n\n def air_prop(T_amb):\n # temp v k Pr\n air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],\n [250, 11.44e-6, 22.3e-3, 0.720],\n [300, 15.89e-6, 26.3e-3, 0.707],\n [350, 20.92e-6, 30.0e-3, 0.700],\n [400, 26.41e-6, 33.8e-3, 0.690],\n [450, 32.39e-6, 37.3e-3, 0.686],\n [500, 38.79e-6, 40.7e-3, 0.684],\n [550, 45.57e-6, 43.9e-3, 0.683],\n [600, 52.69e-6, 46.9e-3, 0.685]])\n\n v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),\n 0, air_prop[:,1:])\n return v, k, Pr\n\n def R_T(R_lo, R_mid, R_hi, T_line, N_cond, T_range=T_range):\n if 273 <= T_line <= 323:\n R = ((R_lo + \n ((R_lo - R_mid)/(T_range[0] - T_range[1]))\n *(T_line - T_range[0]))/N_cond)\n elif T_line > 323:\n R = ((R_mid + \n ((R_mid - R_hi)/(T_range[1] - T_range[2]))\n *(T_line - T_range[1]))/N_cond)\n else:\n R = R_lo\n print('Out of bounds')\n return R\n\n R_lo, R_mid, R_hi = R_list[0], R_list[1], R_list[2]\n temp_factor = 1\n wind_factor = 1\n sigma = 5.6703e-8 # Stefan-Boltzmann constant\n\n T_amb = T_amb*temp_factor\n V = V*wind_factor\n\n v, k, Pr = air_prop(T_amb)\n Re = reynolds(V, D, v)\n htcoeff = nusselt(Re, Pr) * k / D\n\n def T_line(T_init):\n \n R = R_T(R_lo, R_mid, R_hi, T_init, N_cond)\n print R\n\n C4 = e_s * sigma * D * math.pi\n C3 = 0.0\n C2 = 0.0\n C1 = htcoeff * D * math.pi\n C0 = - ( I ** 2 * R\n + I_sun * a_s * D\n + htcoeff * D * math.pi * T_amb\n + e_s * D * math.pi * sigma * (T_amb ** 4))\n\n return np.roots([C4, C3, C2, C1, C0])\n\n T_c = T_amb\n \n for i in range(n_iter):\n T_arr = T_line(T_c)\n T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()\n print T_c\n\n return T_c",
"def C(self,t,K,c0):\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('lsoda')\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('vode', method='bdf', order=15)\n \n # if we have any negative times we assume they occur before the \n # reaction starts hence all negative times are assigned concentration \n # c0\n \n ## could switch to something like ode15s that the oiginal matlab code \n ## uses - can odeint cope with equations as stiff as we need?\n ## to use integrate.ode need order of arguments in dc_dt to switch\n \n #r = scipy.integrate.ode(self.dc_dt)\n #r = r.set_integrator('vode', method='bdf', order=15,nsteps=3000)\n #r = r.set_initial_value(c0)\n #r = r.set_f_params((K,))\n #r.integrate(t)\n \n static_times = t[t<0]\n dynamic_times = t[t>=0]\n\n static_C = np.array([c0 for _ in static_times])\n\n # odeint always takes the first time point as t0\n # our t0 is always 0 (removing t0 occures before we integrate)\n # so if the first time point is not 0 we add it \n \n if not dynamic_times.any() or dynamic_times[0]:\n #fancy indexing returns a copy so we can do this\n dynamic_times = np.hstack([[0],dynamic_times]) \n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))[1:]\n else:\n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))\n \n if static_C.any():\n return np.vstack([static_C,dynamic_C])\n else:\n return dynamic_C",
"def sumofstate_HD(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 1 # hydrogen deuteride\n g_odd = 1\n # ---------------------------------------\n\n data = eJHD\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for HD\n return Q",
"def Tc (x,infin, a, nu):\r\n return infin + a* (x ** (-1/nu))",
"def Qc(I, dT, a, b, c, d, e, f, g, h, i, k):\n x1 = I # I\n x2 = dT # dT\n m = (i * x1 ** 4 + a * x1 ** 3 + b * x1 ** 2 + c * x1 + d)\n b = (k * x1 ** 4 + e * x1 ** 3 + f * x1 ** 2 + g * x1 + h)\n return m * x2 + b",
"def j(U, Q, mesh, T, num_steps, params):\n \n # Define parameters for cost functional\n alpha = params[\"alpha\"]\n u_d = params[\"u_d\"]\n \n # Compute integrals with time\n I1 = 0\n I3 = 0\n \n t = 0\n dt = T/num_steps\n for i in range(num_steps + 1):\n I1_int = assemble((U[i] - u_d[i])*(U[i] - u_d[i])*dx(mesh))\n I3_int = assemble(Q[i]*Q[i]*dx(mesh))\n \n if i == 0 or i == num_steps:\n I1_int *= 0.5\n I3_int *= 0.5\n \n I1 += I1_int\n I3 += I3_int\n \n t += dt\n \n \n I1 *= dt\n I3 *= dt*alpha/2\n \n # Compute end time integral\n \n print(\"Cost Functional Data\")\n print(\"I1: {}\".format(I1))\n print(\"I3: {}\".format(I3))\n print()\n \n return I1 + I3",
"def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y",
"def sumofstate_D2(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 6 # deuterium\n g_odd = 3\n # ---------------------------------------\n\n data = eJD2\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for H2\n return Q",
"def calc_Cinv_CCGT(CC_size_W, CCGT_cost_data):\n\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if CC_size_W < CCGT_cost_data['cap_min'][0]:\n CC_size_W = CCGT_cost_data['cap_min'][0]\n CCGT_cost_data = CCGT_cost_data[\n (CCGT_cost_data['cap_min'] <= CC_size_W) & (CCGT_cost_data['cap_max'] > CC_size_W)]\n\n\n #costs of connection\n connection_costs = ngas.calc_Cinv_gas(CC_size_W)\n\n Inv_a = CCGT_cost_data.iloc[0]['a']\n Inv_b = CCGT_cost_data.iloc[0]['b']\n Inv_c = CCGT_cost_data.iloc[0]['c']\n Inv_d = CCGT_cost_data.iloc[0]['d']\n Inv_e = CCGT_cost_data.iloc[0]['e']\n Inv_IR = CCGT_cost_data.iloc[0]['IR_%']\n Inv_LT = CCGT_cost_data.iloc[0]['LT_yr']\n Inv_OM = CCGT_cost_data.iloc[0]['O&M_%'] / 100\n\n InvC = Inv_a + Inv_b * (CC_size_W) ** Inv_c + (Inv_d + Inv_e * CC_size_W) * log(CC_size_W)\n\n Capex_a_CCGT_USD = calc_capex_annualized((InvC+connection_costs), Inv_IR, Inv_LT)\n Opex_fixed_CCGT_USD = InvC * Inv_OM\n Capex_CCGT_USD = InvC\n\n return Capex_a_CCGT_USD, Opex_fixed_CCGT_USD, Capex_CCGT_USD",
"def calculate_NH_relaxation_from_Ct(bondtype, B_0, t, Ct):\n gamma_1H = 267.513e6 ; # rad s^-1 T^-1\n gamma_X = -27.116e6\n DeltaSigma_X=-160e-6\n r_XH=1.02e-10\n\n om, G = do_dft(t, Ct)\n J = G.real\n\n omega, iOmH, iOmX = obtain_HX_frequencies()\n\n Jw=np.zeros(5)\n for i in range(5):\n w = omega[i]\n Jw[i] = interpolate_point(w, om, J)\n\n # f_DD = 519627720.1974593 , if r_NH is at default values\n f_DD = 7.958699205571828e-67 * r_XH**-6.0 * gamma_X**2\n # f_CSA = 498637299.69233465, if B_0 = 600.13, and DeltaSigma=-160e-6\n f_CSA = 2.0/15.0 * DeltaSigma_X**2 * ( gamma_X * B_0 )**2\n\n R1 = f_DD*( J[iOmH-iOmX] + 3*J[iOmX] + 6*J[iOmH+iOmX] ) + f_CSA*J[iOmX]\n R2 = 0.5*f_DD*( 4*J[0] + J[iOmH-iOmX] + 3*J[iOmX] + 6*J[iOmH+iOmX] + 6*J[iOmH] ) + 1.0/6.0*f_CSA*(4*J[0] + 3*J[iOmX])\n NOE = 1.0 + gamma_1H/gamma_X/R1 * f_DD*(6*J[iOmH+iOmX] - J[iOmH-iOmX])\n\n return R1, R2, NOE",
"def Kg(T, D):\n# return 2.10*np.ones(np.shape(T)) #at 0 degC\n# return Kg0*np.exp(Kg1*T)\n KiT=Kg0*np.exp(Kg1*T)\n return (2.*KiT*D)/(3.-D)",
"def solve_integral(ti, X, kernel, p, events, dt, Tmax):\n partial_sum = 0\n last_partial_sum = 0\n t = ti\n lambda_0 = p(t) * sum([\n fol_count * kernel(t - event_time) for event_time, fol_count in events\n ])\n lambda_1 = None\n while partial_sum < X:\n t += dt\n lambda_1 = p(t) * sum([\n fol_count * kernel(t - event_time)\n for event_time, fol_count in events\n ])\n partial_sum += dt * (lambda_0 + lambda_1) / 2\n\n if partial_sum < X:\n lambda_0 = lambda_1\n last_partial_sum = partial_sum\n if t > Tmax:\n return -1\n\n dlam = (lambda_1 - lambda_0) / dt\n du = X - last_partial_sum\n s = (sqrt(lambda_0 * lambda_0 + 2 * dlam * du) - lambda_0) / dlam\n return t - dt + s",
"def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we",
"def _omori_integrate(t, c, p):\n if p == 1:\n return log((t + c)/c)\n else:\n return ((t + c)**(1 - p) - c**(1 - p))/(1 - p)",
"def calc_cop_CCGT(GT_size_W, T_sup_K, fuel_type):\n\n it_len = 50\n\n # create empty arrays\n range_el_output_CC_W = np.zeros(it_len)\n range_q_output_CC_W = np.zeros(it_len)\n range_eta_el_CC = np.zeros(it_len)\n range_eta_thermal_CC = np.zeros(it_len)\n range_q_input_CC_W = np.zeros(it_len)\n\n # create range of electricity output from the GT between the minimum and nominal load\n range_el_output_from_GT_W = np.linspace(GT_size_W * GT_MIN_PART_LOAD, GT_size_W, it_len)\n\n # calculate the operation data at different electricity load\n for i in range(len(range_el_output_from_GT_W)):\n el_output_from_GT_W = range_el_output_from_GT_W[i]\n\n # combine cycle operation\n CC_operation = calc_CC_operation(el_output_from_GT_W, GT_size_W, fuel_type, T_sup_K)\n range_el_output_CC_W[i] = CC_operation['el_output_W'] # Electricity output from the combined cycle\n range_q_output_CC_W[i] = CC_operation['q_output_ST_W'] # Thermal output from the combined cycle\n range_eta_el_CC[i] = CC_operation['eta_el'] # el. efficiency\n range_eta_thermal_CC[i] = CC_operation['eta_thermal'] # thermal efficiency\n\n range_q_input_CC_W[i] = range_q_output_CC_W[i] / range_eta_thermal_CC[i] # thermal energy input\n\n # create interpolation functions as a function of heat output\n el_output_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_el_output_from_GT_W,\n kind=\"linear\")\n q_input_interpol_with_q_output_W = interpolate.interp1d(range_q_output_CC_W, range_q_input_CC_W, kind=\"linear\")\n\n # create interpolation functions as a function of thermal energy input\n eta_el_interpol_with_q_input = interpolate.interp1d(range_q_input_CC_W, range_eta_el_CC,\n kind=\"linear\")\n\n q_output_min_W = min(range_q_output_CC_W)\n q_output_max_W = max(range_q_output_CC_W)\n\n return {'el_output_fn_q_output_W': el_output_interpol_with_q_output_W,\n 'q_input_fn_q_output_W': q_input_interpol_with_q_output_W,\n 'q_output_min_W': q_output_min_W, 'q_output_max_W': q_output_max_W,\n 'eta_el_fn_q_input': eta_el_interpol_with_q_input}",
"def Fplus_TQ(t,thetaS,phiS,psi):\n return (cos(2*psi)*Dplus_TQ(t,thetaS,phiS)-sin(2*psi)*Dcros_TQ(t,thetaS,phiS))/2.",
"def estimateCt(y, inp):\n\treturn getK2(inp) * (1 - math.exp(-getLambda(inp) * y / getY90(inp)))",
"def sumofstate_H2(T):\n\n Q = np.float64(0.0)\n\n #--- nuclear spin statistics ------------\n g_even = 1 \t# hydrogen\n g_odd = 3\n # ---------------------------------------\n\n data = eJH2\n\n nCols = data.shape[1]\n # nCols is equal to the number of vibrational\n # states included in the summation\n\n # generate Q using each energy from the dataset\n for i in range(0, nCols):\n\n # select row for v=i\n row = data[:,i]\n\n # remove nan values\n x = row[np.logical_not(np.isnan(row))]\n\n # get the dimension (equal to J_max)\n nRows = x.shape[0]\n\n # iterate over the available energies\n for j in range(0, nRows):\n E = x[j]\n energy = (-1*E*H*C)\n\n factor = (2*j+1)*math.exp(energy/(K*T))\n\n if j % 2 == 0:\n factor = factor*g_even\n else:\n factor = factor*g_odd\n Q = Q+factor\n\n\n\n # return the sum of states for H2\n return Q",
"def D(T, xC):\n a = 4.53e5\n b = 8339.9/T\n c = 1./T - 2.221e-4\n d = 17767\n e = -26436\n\n yC = xC/(1. - xC)\n\n # D0 = 4.53e5*(1. + yC*(1.-yC)*8339.9/T) # Pre-exponential term\n # D = D0*np.exp(-(1./T - 2.221e-4)*(17767 - yC*26436)) # um^2/s\n # return D\n return a*(1 + b*yC*(1-yC))*np.exp(-c*(d + e*yC))",
"def add_computed_gas_concentrations(self):\n # Extract the z-coordinate and T, S, P profile\n zs = self.interp_ds.coords[self.ztsp[0]].values\n Ts = self.interp_ds[self.ztsp[1]].values\n Ss = self.interp_ds[self.ztsp[2]].values\n Ps = self.interp_ds[self.ztsp[3]].values\n \n # Create an air object\n air_names = ['nitrogen', 'oxygen', 'argon', 'carbon_dioxide']\n yk = np.array([0.78084, 0.20946, 0.009340, 0.00036])\n from tamoc import dbm\n air = dbm.FluidMixture(air_names)\n m = air.masses(yk)\n \n # Compute the concentrations adjusted for depth\n Cs = np.zeros((len(zs), len(air_names)))\n for i in range(len(zs)):\n Cs[i,:] = air.solubility(m, Ts[i], 101325., Ss[i])[0,:] * \\\n seawater.density(Ts[i], Ss[i], Ps[i]) / \\\n seawater.density(Ts[i], Ss[i], 101325.)\n \n # Make sure none of these gases are already in the measured profile\n for name in air_names:\n if name in self.interp_ds:\n air_names[air_names.index(name)] = 'computed_' + name\n \n # Add these data to the Profile object\n data = np.hstack((np.atleast_2d(zs).transpose(), Cs))\n names = [self.ztsp[0]] + air_names \n units = [self.ztsp_units[0]] + 4*['kg/m^3']\n self.append(data, names, units)\n \n # Rebuild the interpolator\n self._build_interpolator()",
"def sum_sum(t, init):\n return sum(t, init)",
"def N_TT_TB(self, L):\n if L>2.*min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>min(self.CMB.lMaxT, self.CMB.lMaxP):\n return 0.\n phi = self.phi(L, l1, theta)\n result = 0. #self.F_TB(l1, l2, phi)*self.CMB.ftotalTT(l1)*self.CMB.ftotalTB(l2)\n result += 0. #self.F_TB(l2, l1, -phi)*self.CMB.ftotalTB(l1)*self.CMB.ftotalTT(l2)\n result *= self.F_TT(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.N_TT_TB.__func__, \"integ\"):\n self.N_TT_TB.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(min(self.CMB.lMaxT, self.CMB.lMaxP))], [0., np.pi]])\n self.N_TT_TB.integ(integrand, nitn=8, neval=1000)\n result = self.N_TT_TB.integ(integrand, nitn=1, neval=5000)\n return result.mean",
"def CalcT(self,c,inte_pair=(0,1)):\n\n Q0=inte_pair[0] #Flux of particle-Lower limit of integator\n Q1=inte_pair[1] #Flux of target end face - Upper limit of integator\n \n if (Q1!=0 and Q0!=0): \n T=1/c*np.log((Q1)/(Q0))\n else: #Case of no flow boundary \n T=9e9\n\n return T",
"def ftcs_mixed(T, nt, dt, dx, alpha):\n for n in range(nt):\n Tn = T.copy()\n T[1:-1] = Tn[1:-1] + alpha*dt/dx**2*(Tn[2:] -2*Tn[1:-1] + Tn[0:-2])\n T[-1] = T[-2]\n return T",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))"
] | [
"0.6367222",
"0.62956077",
"0.61352813",
"0.59738237",
"0.5969108",
"0.5945719",
"0.5938884",
"0.5851866",
"0.581811",
"0.5812156",
"0.5800408",
"0.5785194",
"0.5775465",
"0.5771758",
"0.57488585",
"0.56765854",
"0.5642424",
"0.56408626",
"0.56318754",
"0.56190896",
"0.56174755",
"0.56117916",
"0.5582701",
"0.5577625",
"0.557177",
"0.5563594",
"0.5562007",
"0.55601484",
"0.55588263",
"0.55588263"
] | 0.7117006 | 0 |
'Accepted a dictionary of thermodynamic data and return Gibbs Free Energy of compound' | def gibbs_(dict_, T):
dST = dict_['S298'] + CpT(dict_, T)[0]
dHT = dict_['dH298'] + CpT(dict_, T)[1]/1000
return (dHT - T*dST/1000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_free_g(energies, temperatures):\n pass",
"def BraggEnergy(ID,hkl,twotheta):\n ID=goodID(ID)\n d=dSpace(ID,hkl)\n l=2*d*sind(twotheta/2.0)\n E=lam2E(l)\n return E",
"def get_internal_energy(filename):\n # --------------- helper functions --------------- #\n def parse_data(block):\n \"\"\"\n Parse the line(s) to get the data.\n \"\"\"\n rval = {\n 'Total' : None,\n 'Electronic' : None,\n 'Translational' : None,\n 'Rotational' : None,\n 'Vibrational' : None\n }\n for line in block.splitlines():\n if re.match(r'^\\s*Total', line):\n key = 'Total'\n elif re.match(r'^\\s*Electronic', line):\n key = 'Electronic'\n elif re.match(r'^\\s*Translational', line):\n key = 'Translational'\n elif re.match(r'^\\s*Rotational', line):\n key = 'Rotational'\n elif re.match(r'^\\s*Vibrational', line):\n key = 'Vibrational'\n else:\n key = None\n if key:\n words = line.strip().split()\n try:\n rval[key] = float(words[1])\n except ValueError:\n raise ValueError('Invalid thermodynamic format.')\n return rval\n # ------------- end helper functions ------------- #\n # open the file, if a string\n if isinstance(filename, str):\n ifs = open(filename, 'r')\n else:\n ifs = filename\n # extract the relevent lines\n start = r'^\\s*E\\s+\\(Thermal\\)'\n stop = r'^\\s*Vibrational'\n rre = RegexRangeExtractor(start, stop,\n include_start=True,\n include_stop=True)\n block = rre(ifs)[0]\n # close file\n if ifs is not filename:\n ifs.close()\n # parse data\n #+ single value/file\n rval = parse_data(block)\n return rval",
"def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n for molecule in molecules:\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies",
"def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u",
"def energy(data):\n return sum(pow(data, 2))",
"def energy(energy_name: str) -> float:\n pass",
"def bethe_free_energy(self, potential):\n xn, xe, lpn, lpe, alpha = self(None, full_out=False)\n fn, fe = potential((xn, xe))\n bfe = -(tf.reduce_sum((fn + self.tw * lpn) * self.wn * alpha, [2, 3, 4]) +\n tf.reduce_sum((fe - lpe) * self.we * alpha, [2, 3, 4]))\n return bfe",
"def get_energy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n energy = (\n gbar * vv / (np.sqrt(2) * np.pi ** 2) * tt ** 2.5 * _1d_call(_fdk, y, k=1.5)\n )\n return energy",
"def compute_hydration_energy(molecule, parameters, platform_name=\"Reference\"):\n\n platform = openmm.Platform.getPlatformByName(platform_name)\n\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n \n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n \n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n try:\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole\n except Exception, exception:\n print \"Cannot find parameters for atomtype '%s' in molecule '%s'\" % (atomtype, molecule.GetTitle())\n print parameters.keys()\n raise exception\n \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n \n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energy = state.getPotentialEnergy() / units.kilocalories_per_mole\n if numpy.isnan(energy):\n energy = +1e6;\n\n return energy",
"def band_energy(k,t=1.0,e0=0.2,a=1.0):\n return e0-t*np.exp(1j*k*a)-t*np.exp(-1j*k*a)",
"def read_charges_and_energy(self):\n infile = open(os.path.join(self.directory, 'detailed.out'), 'r')\n lines = infile.readlines()\n infile.close()\n\n #for line in lines:\n # if line.strip().startswith('Total energy:'):\n # energy = float(line.split()[2]) * Hartree\n # break\n\n # for finite-temperature DFT, 0K energy is needed\n for line in lines:\n if line.strip().startswith('Extrapolated to 0:'):\n energy = float(line.split()[3]) * Hartree\n break\n\n # for hellman-feynman force, need force-related free energy\n for line in lines:\n if line.strip().startswith('Force related energy:'):\n free_energy = float(line.split()[3]) * Hartree\n break\n\n qm_charges = []\n for n, line in enumerate(lines):\n if ('Atom' and 'Charge' in line):\n chargestart = n + 1\n break\n else:\n # print('Warning: did not find DFTB-charges')\n # print('This is ok if flag SCC=No')\n return None, energy\n\n lines1 = lines[chargestart:(chargestart + len(self.atoms))]\n for line in lines1:\n qm_charges.append(float(line.split()[-1]))\n\n return np.array(qm_charges), energy, free_energy",
"def get_energy(self):\r\n return self._energy",
"def get_kt(temps, delta_gibbs_ts):\n # rate coefficient from Eyring equation\n return KB / H * temps * np.exp(-delta_gibbs_ts / RG / temps) # [1/s] if unimolecular",
"def energy_bohr_orbital(atomicZ=Z1,mass_n=mn1,num=1,units=eV):\n\n var = sy.var('Z me mn R n')\n par = atomicZ, units['m_e'], mass_n, units['Rydberg'], num\n\n y = - Z**2 / (1 + (me/mn)) * (R/n**2)\n return dic_result(var,par,y)",
"def geometric_descriptor(element_dict):\n # encode the orbital types\n category = {'s': 1, 'p': 2, 'd': 3, 'f': 4};\n # total number of atoms in a perovskite structure\n N = sum(element_dict.values())\n # obtain array of atomic properties for each element type\n atomic_number_list = []\n atomic_mass_list = []\n atomic_radius_list = []\n mendeleev_no_list = []\n common_oxidation_states_list = []\n Pauling_electronegativity_list = []\n row_list = []\n group_list = []\n block_list = []\n thermal_conductivity_list = []\n boiling_point_list = []\n melting_point_list = []\n average_ionic_radius_list = []\n molar_volume_list = []\n atomic_orbitals_list = []\n for item in element_dict:\n # extract atomic property from pymatgen\n ele = mg.Element(item)\n atomic_number = ele.Z\n atomic_mass = float(str(ele.atomic_mass)[:-4])\n atomic_radius = float(str(ele.atomic_radius)[:-4])\n mendeleev_no = ele.mendeleev_no\n common_oxidation_states = ele.common_oxidation_states[0]\n Pauling_electronegativity = ele.X\n row = ele.row\n group = ele.group\n block = ele.block\n thermal_conductivity = float(str(ele.thermal_conductivity)[:-12])\n boiling_point = float(str(ele.boiling_point)[: -2])\n melting_point = float(str(ele.melting_point)[: -2])\n average_ionic_radius = float(str(ele.average_ionic_radius)[:-4])\n molar_volume = float(str(ele.molar_volume)[: -5])\n if '6s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['6s']\n elif '4s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['4s']\n else:\n atomic_orbitals = ele.atomic_orbitals['2s']\n # calculate the array of atomic properties for all atoms \n atomic_number_list += [atomic_number]*element_dict[item]\n atomic_mass_list += [atomic_mass]*element_dict[item]\n atomic_radius_list += [atomic_radius]*element_dict[item]\n mendeleev_no_list += [mendeleev_no]*element_dict[item]\n common_oxidation_states_list += [common_oxidation_states]*element_dict[item]\n Pauling_electronegativity_list += [Pauling_electronegativity]*element_dict[item]\n row_list += [row]*element_dict[item]\n group_list += [group]*element_dict[item]\n block_list += [category[block]]*element_dict[item]\n thermal_conductivity_list += [thermal_conductivity]*element_dict[item]\n boiling_point_list += [boiling_point]*element_dict[item]\n melting_point_list += [melting_point]*element_dict[item]\n average_ionic_radius_list += [average_ionic_radius]*element_dict[item]\n molar_volume_list += [molar_volume]*element_dict[item]\n atomic_orbitals_list += [atomic_orbitals]*element_dict[item]\n return [generalized_mean(np.array(atomic_number_list), 1, N)] + [generalized_mean(np.array(atomic_radius_list), 1, N)] + [generalized_mean(np.array(mendeleev_no_list), 1, N)] + [generalized_mean(np.array(common_oxidation_states_list), 1, N)] + [generalized_mean(np.array(Pauling_electronegativity_list), 1, N)] + [generalized_mean(np.array(thermal_conductivity_list), 1, N)] + [generalized_mean(np.array(average_ionic_radius_list), 1, N)] + [generalized_mean(np.array(atomic_orbitals_list), 1, N)]",
"def free_energy_function(self, x):\n \n wx_b = T.dot(x, self.W) + self.bhid\n \n return -T.sum(T.log(1 + T.exp(wx_b)), axis=1) -T.dot(x, self.b)",
"def ReadGaussian():\n # LICHEM calculates this as the optimization energy - self energy.\n # Self energy of the charges = {f} a.u.\n self_line = \" Self energy of the charges\"\n # SCF Done: E({s}) = {f} A.U. after {d} cycles\n SCF_line = \" SCF Done:\"\n with open('LICHM_GaussEnergy_0.log') as f:\n for line in f:\n if line.startswith(self_line):\n # print(line)\n ## Parses unmarked positive and - int and float values\n selfE = re.findall(r'\\-\\d+\\.*\\d*', line)\n selfE = float(selfE[0])\n elif line.startswith(SCF_line):\n # print(line)\n SCFE = re.findall(r'\\-\\d+\\.*\\d*', line)\n SCFE = float(SCFE[0])\n f.close()\n GaussE = SCFE - selfE\n GaussE *= har2ev\n return GaussE",
"def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')",
"def gbs_params(\n w: np.ndarray, wp: np.ndarray, Ud: np.ndarray, d: np.ndarray, T: float = 0\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n if T < 0:\n raise ValueError(\"Temperature must be zero or positive\")\n if T > 0:\n t = np.arctanh(np.exp(-0.5 * h * (w * c * 100) / (k * T)))\n else:\n t = np.zeros(len(w))\n\n U2, s, U1 = np.linalg.svd(np.diag(wp ** 0.5) @ Ud @ np.diag(w ** -0.5))\n alpha = d / np.sqrt(2)\n\n return t, U1, np.log(s), U2, alpha",
"def computeDataEnergy(self):\n _cgco.gcoComputeDataEnergy(self.handle, self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])",
"def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp",
"def calc_carbon_herb(height, diameter = 1, age = 1):\n \"\"\"This includes habits: perennial, annual, bulb, climber, biennial\\\n annual/biennial, perennial climber, annual/perennial, corm, annual climber\"\"\"\n \n #convert to imperial\n height /= 3.281 #feet\n diameter /= 2.54 #inches\n \n #print(height, diameter)\n \n #calculate green weight of herb: (above-ground weight) * 1.2\n green_weight = ( diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/1 #convert from lbs to kg, divide by age",
"def _sum_g_i(self) -> float:\n elems = self.composition.get_el_amt_dict()\n\n if self.interpolated:\n sum_g_i = 0\n for elem, amt in elems.items():\n g_interp = interp1d(\n [float(t) for t in G_ELEMS.keys()],\n [g_dict[elem] for g_dict in G_ELEMS.values()],\n )\n sum_g_i += amt * g_interp(self.temp)\n else:\n sum_g_i = sum(amt * G_ELEMS[str(self.temp)][elem] for elem, amt in elems.items())\n\n return sum_g_i",
"def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)",
"def set_gsenergy(self, gsenergy=None):\n self.status()\n if not gsenergy:\n if self.__cod == 'vasp': \n #getData = VASP()\n getData = vasp.Energy()\n outfile = 'vasprun.xml'\n elif self.__cod == 'espresso':\n getData = espresso.Energy()\n outfile = 'espresso.out'\n elif self.__cod == 'wien':\n getData = wien.Energy()\n \n elif self.__cod == 'exciting':\n getData = exciting.Energy()\n outfile = 'INFO.OUT'\n elif self.__cod == 'emto':\n getData = emto.Energy(funct=self.__funct)\n outfile = '%s/prn/%s'%(self.__pname,self.__emtoout)\n gsenergy=[] \n for atoms in sorted(self.__structures.items()):\n \n if self.__cod == 'wien': \n outfile = atoms[1].path.split('/')[-1] + '.scf'\n \n if not atoms[1].status:\n #print atoms[1].status\n atoms[1].gsenergy = 0\n continue\n if atoms[1].exclude:\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n if os.path.exists(atoms[1].path+'/exclude'):\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n \n \n #getData.set_outfile('%s/%s/'%atoms[0] + outfile)\n #getData.set_gsEnergy()\n #print atoms[1].path, self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile)\n if 'eta' in atoms[1].path.split('/')[-1] and self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s/%s'%(atoms[1].path.split('/')[-3],atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n elif 'eta' in atoms[1].path.split('/')[-1] and not self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n else: getData.set_fname(self.__workdir + '%s'%(atoms[1].path.split('/')[-1])+'/' + outfile)\n print getData.get_fname()\n getData.set_gsenergy()\n if self.__thermodyn and self.__mod!='structures_phonons':\n outfile_ph = 'F_TV'\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n #getData.T = self.__T\n \n getData.set_phenergy(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n atoms[1].phenergy = getData.get_phenergy()\n atoms[1].T = getData.T\n #atoms[1].gsenergy = getData.get_gsEnergy()\n atoms[1].gsenergy = getData.get_gsenergy()/125.\n else:\n atoms[1].gsenergy = getData.get_gsenergy()\n gsenergy.append(atoms[1].gsenergy)\n \n if self.delPoints:\n for atoms in sorted(self.__structures.items()):\n \n #print [atoms[1].eta for atoms in sorted(self.__structures.items())], gsenergy\n coeff = np.polyfit([atoms[1].eta for atoms in self.__structures.items()], gsenergy, 2)\n p = np.poly1d(coeff)\n k=0\n for (etas,energy) in zip(self.__structures.items(),gsenergy):\n #print (energy-p(etas[1].eta))**2.\n if (energy-p(etas[1].eta))**2. > 0.0004: \n gsenergy[k]=0.\n atoms[1].gsenergy = 0. \n k+=1\n self.__gsenergy = gsenergy",
"def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)",
"def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))",
"def get_energy(edr, annealing_times, energy_type = 'Potential', out_fig = 'energy_distribution.svg'): # Could be Total-Energy\n fig, ax = plt.subplots(figsize = (16,9))\n data = pd.DataFrame()\n xvg_tmp_file = tempfile.NamedTemporaryFile(suffix='.xvg')\n energy = []\n iterator = range(0, len(annealing_times)-1, 2)\n\n for state, index in tqdm.tqdm(enumerate(iterator), total=len(iterator)):#enumerate(iterator):# # the calculation is per pair of times, beetween the first to time the temperature was keep constant, then the system was heated and repeated again.\n run = tools.run(f\"export GMX_MAXBACKUP=-1; echo {energy_type} | gmx energy -f {edr} -b {annealing_times[index]} -e {annealing_times[index + 1]} -o {xvg_tmp_file.name} | grep \\'{energy_type.replace('-',' ')}\\'\")\n energy.append(float(run.stdout.split()[-5]))\n \"\"\"\n Energy Average Err.Est. RMSD Tot-Drift\n -------------------------------------------------------------------------------\n Potential -1.30028e+06 -- 1682.1 -2422.24 (kJ/mol)\n Total Energy -952595 -- 2606.81 -3688.3 (kJ/mol)\n \"\"\"\n # Getting the histograms and checking for the same len in all intervals\n if state == 0:\n data[state] = xvg.XVG(xvg_tmp_file.name).data[:,1]\n else:\n xvg_data = xvg.XVG(xvg_tmp_file.name).data[:,1]\n if xvg_data.shape[0] > data.shape[0]:\n data[state] = xvg_data[:data.shape[0]]\n else:\n data = data.iloc[:xvg_data.shape[0]]\n data[state] = xvg_data\n\n\n print(data)\n sns.histplot(data = data, element='poly', stat = 'probability', axes = ax)\n ax.set(\n xlabel = f'{energy_type} [kJ/mol]',\n ylabel = 'Probability',\n title = f'Distribution of {energy_type}')\n # plt.show()\n fig.savefig(out_fig)\n return energy",
"def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f"
] | [
"0.650521",
"0.64380336",
"0.6039046",
"0.59228855",
"0.5849885",
"0.5758292",
"0.5723967",
"0.5673121",
"0.5652979",
"0.5629196",
"0.5627507",
"0.5578335",
"0.55377835",
"0.5457064",
"0.5428888",
"0.54141587",
"0.53989697",
"0.5396005",
"0.5381766",
"0.53807193",
"0.53804505",
"0.5371923",
"0.5368069",
"0.5354997",
"0.53521645",
"0.53373206",
"0.53356296",
"0.5335123",
"0.53192234",
"0.53155565"
] | 0.6666221 | 0 |
Action on one light by light_id. | def action_on_light_by_id(bridge, light_id, action):
if action == 'on':
bridge.set_light(light_id, 'on', True)
elif action == 'off':
bridge.set_light(light_id, 'on', False)
elif action == 'toggle':
current_state = bridge.get_light(light_id, 'on')
bridge.set_light(light_id, 'on', not current_state)
click.secho(
'Turning %s light %s!' % (bridge.get_light(light_id, 'name'),
get_state(not current_state)),
fg='green')
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def Turn_On_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": \"green\",\n }",
"def addLight(self, id):\r\n\t\t\r\n\t\tnewLight = Light(id)\r\n\t\tself.lights[id] = newLight",
"async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }",
"async def Rainbow_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n\n busylightapi.manager.apply_effect_to_light(light_id, rainbow)\n return {\n \"action\": \"effect\",\n \"name\": \"rainbow\",\n \"light_id\": light_id,\n }",
"async def Flash_Light_Impressively(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n busylightapi.manager.apply_effect_to_light(light_id, flash_lights_impressively)\n return {\n \"action\": \"effect\",\n \"name\": \"fli\",\n \"light_id\": light_id,\n }",
"async def handle_set_light(self, match: Match[str], payload: str) -> None:\n uniqueid = match.group(1)\n\n # Find the light with that uniqueid\n for light_id in self._bridge.lights:\n light = self._bridge.lights[light_id]\n if light.uniqueid == uniqueid:\n try:\n state = LightSetState(**json.loads(payload))\n LOGGER.info(f\"Updating {light.name}\")\n await light.set_state(**state.dict())\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")\n return\n LOGGER.warning(f\"Unknown light uniqueid: {uniqueid}\")",
"def lights(id, all, connect, info, action, bri):\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n\n if connect:\n # If the app is not registered and the button is not pressed,\n # press the button and call connect()\n # (this only needs to be run a single time)\n try:\n bridge = phue.Bridge(BRIDGE_IP)\n except Exception:\n click.secho(\n \"Press the bridge buttom and call the connect again\", fg='red')\n else:\n click.secho(\"Already connected\", fg='green')\n\n return\n\n if info:\n # TODO: Print details of all lights\n click.secho('Light details', fg='green')\n for l in bridge.lights:\n\n click.secho(\n '\\t %d: %s is %s' % (l.light_id, l.name, get_state(l.on)),\n fg='green')\n\n if all:\n # TODO: Add api to Run action on all\n click.secho('TODO ADD: Run action on all', fg='green')\n for l in bridge.lights:\n action_on_light_by_id(bridge, l.light_id, action)\n\n else:\n if not valid_id(id):\n return\n action_on_light_by_id(bridge, int(id), action)",
"async def test_light_turn_on(\n hass: HomeAssistant,\n light: tuple[Light, str],\n):\n\n entity_id = light[1]\n light[0].__fields__[\"set_light\"] = Mock()\n light[0].set_light = AsyncMock()\n\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 128},\n blocking=True,\n )\n\n light[0].set_light.assert_called_once_with(True, 3)",
"async def Pulse_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n busylightapi.manager.apply_effect_to_light(light_id, pulse)\n return {\n \"action\": \"effect\",\n \"name\": \"pulse\",\n \"light_id\": light_id,\n \"color\": \"red\",\n }",
"def publish_light(self, light: LightInfo) -> None:\n self._mqtt.publish(f\"light/{light.uniqueid}\", light, retain=True)",
"def on(\n id: int = typer.Argument(1),\n ip: str = typer.Option(..., \"--ip\", \"-i\", envvar=\"HUE_BRIDGE_IP\"),\n user: str = typer.Option(..., \"--user\", \"-u\", envvar=\"HUE_BRIDGE_USER\"),\n):\n light = Light(id, ip=ip, user=user)\n resp = asyncio.run(light.power_on())\n console.print(f\"[{ip}] Light {id} On:\\n{json.dumps(resp, indent=2)}\")",
"async def Pulse_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.apply_effect_to_light(light_id, pulse, color=color)\n return {\n \"action\": \"effect\",\n \"name\": \"pulse\",\n \"light_id\": light_id,\n \"color\": color,\n }",
"def check_light(light: pykulersky.Light):\n light.connect()\n light.get_color()",
"async def Turn_Off_Light(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0)\n) -> Dict[str, Any]:\n busylightapi.manager.light_off(light_id)\n return {\n \"action\": \"off\",\n \"light_id\": light_id,\n }",
"def turnLightOn(ID):\n dislin.litmod(ID, 'ON')",
"async def Blink_Light_With_Color_and_Speed(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n speed: BlinkSpeed = Path(..., title=\"Speed: slow, medium, fast\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_blink(light_id, color, speed)\n return {\n \"action\": \"blink\",\n \"light_id\": light_id,\n \"color\": color,\n \"speed\": speed,\n }",
"def set_light(self, idx, light):\n\n # Don't set a light that doesn't need its thing set\n if self.application.settings[\"lights_state\"][idx] == light:\n return\n\n # synchronize our internal representation of the lights\n self.application.settings[\"lights_state\"][idx] = light\n\n packed_cmd = srsly.pack_light_data(idx, light)\n srsly.write_light_cmd(\n self.application.settings['serial_connection'],\n packed_cmd,\n sleep=self.application.settings[\"refresh_rate\"])",
"def setLight(self, id, position, diffuse, specular, ambient):\r\n\t\t\r\n\t\tself.lights[id].set(position, diffuse, specular, ambient)",
"def getLight(self):\n return self.light",
"def add_light(self, light):\n # convert from a vtk type if applicable\n if isinstance(light, _vtk.vtkLight) and not isinstance(light, pyvista.Light):\n light = pyvista.Light.from_vtk(light)\n\n if not isinstance(light, pyvista.Light):\n raise TypeError(f'Expected Light instance, got {type(light).__name__} instead.')\n self._lights.append(light)\n self.AddLight(light)\n self.Modified()\n\n # we add the renderer to add/remove the light actor if\n # positional or cone angle is modified\n light.add_renderer(self)",
"def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True",
"def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")",
"async def light(self) -> None:\n self.lit = True\n await self.run_command(\"miner fault_light on\")\n print(\"light \" + self.ip)",
"async def lights(self, context):\n\n await random_image(context, 'lights')",
"def set_light_rgb(self, light, color):\n light_kwargs = { \"rgb_color\": color }\n if not self.use_current_brightness:\n light_kwargs[\"brightness\"] = 255\n self.turn_on(light, **light_kwargs)",
"def place_red_light():\n glMatrixMode(GL_MODELVIEW)\n lx = 4.0\n ly = light_height\n lz = 2.0\n light_position = [lx, ly, lz, 1.0]\n lightr_ambient = [1.0, 0, 0, 1] # red\n lightb_diffuse = [0.4, 0.4, 0.6, 1] # blue\n lightb_specular = [0.0, 0, 0.8, 1] # blue\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n\n\n # For Light 1 (red), set position, ambient, diffuse, and specular values\n glLightfv(GL_LIGHT1, GL_POSITION, light_position)\n glLightfv(GL_LIGHT1, GL_AMBIENT, lightr_ambient)\n glLightfv(GL_LIGHT1, GL_DIFFUSE, lightb_diffuse)\n glLightfv(GL_LIGHT1, GL_SPECULAR, lightb_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if red_light:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 45.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT1, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT1, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT1, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT1)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, 0, 0)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()",
"def set_light(self, light, num=0):\r\n #TODO (pg) need MAXLIGHTS global variable, room for two now but shader\r\n # only uses 1.\r\n if num > 1 or num < 0:\r\n num = 0\r\n stn = 24 + num * 9\r\n self.unif[stn:(stn + 3)] = light.lightpos[0:3]\r\n self.unif[(stn + 3):(stn + 6)] = light.lightcol[0:3]\r\n self.unif[(stn + 6):(stn + 9)] = light.lightamb[0:3]",
"def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False",
"def add_light(self, name, light):\n if isinstance(light, AmbientLight):\n raise ValueError('Set ambient light with set_ambient_light(), not with add_light()')\n if len(self._lights) == MAX_N_LIGHTS:\n raise ValueError('The maximum number of lights in a scene is capped at {}'.format(MAX_N_LIGHTS))\n if not isinstance(light, PointLight) and not isinstance(light, DirectionalLight):\n raise ValueError('Scene only supports PointLight and DirectionalLight types')\n self._lights[name] = light"
] | [
"0.72733927",
"0.6812936",
"0.6758126",
"0.6477925",
"0.63752294",
"0.624486",
"0.62303555",
"0.61155444",
"0.6082006",
"0.5978591",
"0.59443074",
"0.58976763",
"0.58453494",
"0.5826358",
"0.58137035",
"0.5706582",
"0.56572086",
"0.5648138",
"0.56393903",
"0.56304437",
"0.56218606",
"0.55991673",
"0.55713767",
"0.55710113",
"0.5539848",
"0.5508792",
"0.54889804",
"0.5484767",
"0.5481671",
"0.54690796"
] | 0.7258166 | 1 |
Running the list of conf in a multiprocess pool | def pooling(lconf, poolsize=10):
pool = Pool(poolsize)
pool.map(worker, lconf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forqs_parallel(configs):\n pool = Pool(21)\n pool.map(forqs_sim, configs)\n pool.close()\n pool.join()",
"def run(self):\n self.logger.info(\"Starting execution loop...\")\n with ThreadPoolExecutor(\n max_workers=len(self.config) + 10 - (len(self.config) % 10)\n ) as executor:\n for target in self.config:\n executor.submit(self.monitor, target)\n executor.shutdown(wait=True)",
"def run(config):\n for section in config.sections():\n if 'module' in [tuple[0] for tuple in config.items(section)]:\n read_metric = Process(target=worker.start_worker,\n args=(config[section]['module'], config[section]['worker'],\n float(config[section]['interval']), config),\n kwargs=(dict({item[0]: eval(item[1])\n for item in config.items(section+'_PARAMS')})))\n read_metric.start()\n consumer = Process(target=dbWriter.write,\n args=(section, config[section]['worker'], config))\n consumer.start()\n db_reader_template.start(config)",
"def conf_load_par_list(par_def):\n par_def = par_def[1:-1].split(',')\n par_list = list()\n for p in par_def:\n par_list.append(p.strip())\n return par_list",
"def setup_multi_processes(cfg):\n logger = get_root_logger()\n\n # set multi-process start method\n if platform.system() != 'Windows':\n mp_start_method = cfg.get('mp_start_method', None)\n current_method = mp.get_start_method(allow_none=False)\n if mp_start_method in ('fork', 'spawn', 'forkserver'):\n logger.info(\n f'Multi-processing start method is `{mp_start_method}`')\n mp.set_start_method(mp_start_method, force=True)\n else:\n logger.info(f'Multi-processing start method is `{current_method}`')\n\n # disable opencv multithreading to avoid system being overloaded\n opencv_num_threads = cfg.get('opencv_num_threads', None)\n if isinstance(opencv_num_threads, int):\n logger.info(f'OpenCV num_threads is `{opencv_num_threads}`')\n cv2.setNumThreads(opencv_num_threads)\n else:\n logger.info(f'OpenCV num_threads is `{cv2.getNumThreads()}')\n\n if cfg.data.train_dataloader.workers_per_gpu > 1:\n # setup OMP threads\n # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa\n omp_num_threads = cfg.get('omp_num_threads', None)\n if 'OMP_NUM_THREADS' not in os.environ:\n if isinstance(omp_num_threads, int):\n logger.info(f'OMP num threads is {omp_num_threads}')\n os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)\n else:\n logger.info(f'OMP num threads is {os.environ[\"OMP_NUM_THREADS\"] }')\n\n # setup MKL threads\n if 'MKL_NUM_THREADS' not in os.environ:\n mkl_num_threads = cfg.get('mkl_num_threads', None)\n if isinstance(mkl_num_threads, int):\n logger.info(f'MKL num threads is {mkl_num_threads}')\n os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)\n else:\n logger.info(f'MKL num threads is {os.environ[\"MKL_NUM_THREADS\"]}')",
"def batchLoopExec(serverList, cmdList):\n\n for server in serverList:\n #env.hosts = [ server['host'] ]\n env.host_string = server['host']\n env.port = server['port']\n env.user = server['user']\n env.password = server['password']\n for cmd in cmdList:\n exeCmd(cmd)",
"def postConf(conf):\n\n rootbconf = conf.bconfManager.root\n btypeDir = rootbconf.selectedBuildTypeDir\n rootdir = rootbconf.rootdir\n\n for taskParams in conf.allOrderedTasks:\n\n features = taskParams['features']\n cmdArgs = taskParams.get('run', None)\n\n if 'runcmd' not in features:\n if cmdArgs is not None:\n features.append('runcmd')\n else:\n continue\n\n if cmdArgs is None:\n cmdArgs = {}\n elif not isinstance(cmdArgs, maptype):\n cmdArgs = { 'cmd' : cmdArgs }\n\n cmdArgs.update({\n 'name' : taskParams['name'],\n 'timeout': cmdArgs.get('timeout', None),\n 'env' : cmdArgs.get('env', {}),\n 'repeat' : cmdArgs.get('repeat', 1),\n })\n\n taskParams['run'] = cmdArgs\n\n cwd = cmdArgs.get('cwd', None)\n if cwd:\n try:\n cwd = cwd.abspath()\n except AttributeError:\n startdir = cmdArgs.get('startdir', taskParams['$bconf'].startdir)\n cwd = PathsParam(cwd, startdir, rootdir).abspath()\n else:\n cwd = btypeDir\n cmdArgs['cwd'] = cwd\n\n cmdArgs['$type'] = ''\n cmd = cmdArgs.get('cmd', None)\n if cmd and callable(cmd):\n # it's needed because a function cannot be saved in a file as is\n cmdArgs['cmd'] = cmd.__name__\n cmdArgs['shell'] = False\n cmdArgs['$type'] = 'func'",
"def run_modules():\n pool = Pool()\n if module_inc_opts != ['']:\n runmods = gen_runlist(module_inc_opts, available_mods)\n if not multiprocessing:\n for module in runmods:\n modExec(module)\n else:\n runner = pool.map(modExec, runmods)\n\n elif module_exc_opts != ['']:\n runmods = [x for x in available_mods if x not in gen_runlist(module_exc_opts, available_mods)]\n\n if not multiprocessing:\n for module in runmods:\n modExec(module)\n else:\n runner = pool.map(modExec, runmods)\n\n pool.close()\n pool.join()",
"def run_async(self, examples, pool):\n return pool.imap(self, examples)",
"def tasks():",
"def _run_parallel(parameters):\n\n # make parallel context global\n global pc\n\n print parameters\n # create parallel context instance\n pc = h.ParallelContext()\n\n print 'i am', pc.id(), 'of', pc.nhost()\n # start workers, begins an infinitely loop where master workers posts jobs and workers pull jobs until all jobs are finished\n pc.runworker()\n \n # print len(parameters)\n # # # distribute experiment and parameters to workers\n for param in parameters:\n # print len(parameters)\n # print param\n pc.submit(_f_parallel, param)\n # print param\n\n # # continue runnning until all workers are finished\n while pc.working():\n print pc.id(), 'is working'\n\n # # close parallel context \n pc.done()",
"def launch(config_list):\n config = METplusConfig()\n logger = config.log()\n\n # set config variable for current time\n config.set('config', 'CLOCK_TIME',\n datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n\n config_format_list = []\n # Read in and parse all the conf files and overrides\n for config_item in config_list:\n if isinstance(config_item, str):\n logger.info(f\"Parsing config file: {config_item}\")\n config.read(config_item)\n config_format_list.append(config_item)\n else:\n # set explicit config override\n section, key, value = config_item\n if not config.has_section(section):\n config.add_section(section)\n\n logger.info(f\"Parsing override: [{section}] {key} = {value}\")\n config.set(section, key, value)\n config_format_list.append(f'{section}.{key}={value}')\n\n # move all config variables from old sections into the [config] section\n config._move_all_to_config_section()\n\n # save list of user configuration files in a variable\n config.set('config', 'CONFIG_INPUT', ','.join(config_format_list))\n\n # get OUTPUT_BASE to make sure it is set correctly so the first error\n # that is logged relates to OUTPUT_BASE, not LOG_DIR, which is likely\n # only set incorrectly because OUTPUT_BASE is set incorrectly\n # Initialize the output directories\n util.mkdir_p(config.getdir('OUTPUT_BASE'))\n\n # set and log variables to the config object\n get_logger(config)\n\n final_conf = config.getstr('config', 'METPLUS_CONF')\n\n # create final conf directory if it doesn't already exist\n final_conf_dir = os.path.dirname(final_conf)\n if not os.path.exists(final_conf_dir):\n os.makedirs(final_conf_dir)\n\n # set METPLUS_BASE/PARM_BASE conf so they can be referenced in other confs\n config.set('config', 'METPLUS_BASE', METPLUS_BASE)\n config.set('config', 'PARM_BASE', PARM_BASE)\n\n with open(final_conf, 'wt') as file_handle:\n config.write(file_handle)\n\n return config",
"def launch_processes(run_type, tests, run_module, config):\n test_summaries = {}\n with mp.Pool(livvkit.pool_size) as pool:\n results = [\n pool.apply_async(pool_worker, (run_type, run_module.run_suite, t, config[t])) for t in tests\n ]\n\n for t, r in zip(tests, results):\n test_summaries[t] = r.get()\n\n return test_summaries",
"async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download",
"def parallel(files):\n return list(map(join_process, list(map(start_process, files))))",
"def launch(config):\n \n launch_with_configs([config])",
"def build_dlosenv_multipro(catalog_name, n_mocks, Nthreads=8):\n \n if isinstance(n_mocks, list): \n n_mock_list = n_mocks\n else:\n n_mock_list = range(1, n_mocks + 1)\n\n n_NN_list = [1, 3, 5, 7, 10] \n\n pool = Pewl(processes=Nthreads)\n mapfn = pool.map\n \n arglist = [] \n for i_mock in n_mock_list: \n for n_NN in n_NN_list: \n arglist.append([\n {\n 'catalog': {'name': 'nseries', 'n_mock': i_mock}, \n 'correction': {'name': 'upweight'}\n }, \n n_NN\n ])\n \n mapfn( build_dlosenv_wrapper, [arg for arg in arglist])\n\n pool.close()\n pool.terminate()\n pool.join() \n\n return None",
"def haiku_multiprocessing(paths, num_processes=2):\n with Pool(num_processes) as pool:\n results = pool.map(single_process, paths)\n return results",
"def batchSyncExec(serverList, cmdList):\n for cmd in cmdList:\n for server in serverList:\n env.host_string = server['host']\n env.port = server['port']\n env.user = server['user']\n env.password = server['password']\n exeCmd(cmd)",
"def setConcurrentTasks(self, config):\n self.concurrentTasks = [{'func': self.gatherActiveDataStats, 'duration': config.activeDuration}, \n {'func': self.gatherArchivedDataStats, 'duration': config.archiveDuration}]",
"def config(c):\n for sp_ns in ns_foreach_task_subdir(c):\n try:\n sp_ns.tasks.config(c)\n except UnexpectedExit:\n pass",
"def get_list_of_configlets(configlets):\n\n futures_list = []\n results = []\n\n with ThreadPoolExecutor(max_workers=40) as executor:\n for configlet in configlets:\n futures = executor.submit(clnt.api.get_configlet_by_name, configlet)\n futures_list.append(futures)\n\n for future in futures_list:\n try:\n result = future.result(timeout=60)\n results.append(result)\n except Exception:\n results.append(None)\n return results",
"def _configure_all_tasks(self, config, job_exe, job_type):\n\n config.set_task_ids(job_exe.get_cluster_id())\n\n for task_type in config.get_task_types():\n # Configure env vars describing allocated task resources\n env_vars = {}\n nvidia_docker_label = None\n\n for resource in config.get_resources(task_type).resources:\n env_name = 'ALLOCATED_%s' % normalize_env_var_name(resource.name)\n env_vars[env_name] = '%.1f' % resource.value # Assumes scalar resources\n if resource.name == \"gpus\" and int(resource.value) > 0:\n gpu_list = GPUManager.get_nvidia_docker_label(job_exe.node_id, job_exe.job_id)\n nvidia_docker_label = DockerParameter('env','NVIDIA_VISIBLE_DEVICES={}'.format(gpu_list.strip(',')))\n\n # Configure env vars for Scale meta-data\n env_vars['SCALE_JOB_ID'] = unicode(job_exe.job_id)\n env_vars['SCALE_EXE_NUM'] = unicode(job_exe.exe_num)\n if job_exe.recipe_id:\n env_vars['SCALE_RECIPE_ID'] = unicode(job_exe.recipe_id)\n if job_exe.batch_id:\n env_vars['SCALE_BATCH_ID'] = unicode(job_exe.batch_id)\n\n # Configure workspace volumes\n workspace_volumes = {}\n for task_workspace in config.get_workspaces(task_type):\n logger.debug(self._workspaces)\n workspace_model = self._workspaces[task_workspace.name]\n # TODO: Should refactor workspace broker to return a Volume object and remove BrokerVolume\n if workspace_model.volume:\n vol_name = get_workspace_volume_name(job_exe, task_workspace.name)\n cont_path = get_workspace_volume_path(workspace_model.name)\n if workspace_model.volume.host:\n host_path = workspace_model.volume.remote_path\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=True, host_path=host_path)\n else:\n driver = workspace_model.volume.driver\n driver_opts = {}\n # TODO: Hack alert for nfs broker, as stated above, we should return Volume from broker\n if driver == 'nfs':\n driver_opts = {'share': workspace_model.volume.remote_path}\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=False, driver=driver,\n driver_opts=driver_opts)\n workspace_volumes[task_workspace.name] = volume\n\n config.add_to_task(task_type, env_vars=env_vars, wksp_volumes=workspace_volumes)\n\n # Labels for metric grouping\n job_id_label = DockerParameter('label', 'scale-job-id={}'.format(job_exe.job_id))\n job_execution_id_label = DockerParameter('label', 'scale-job-execution-id={}'.format(job_exe.exe_num))\n job_type_name_label = DockerParameter('label', 'scale-job-type-name={}'.format(job_type.name))\n job_type_version_label = DockerParameter('label', 'scale-job-type-version={}'.format(job_type.version))\n main_label = DockerParameter('label', 'scale-task-type=main')\n if nvidia_docker_label:\n nvidia_runtime_param = DockerParameter('runtime', 'nvidia')\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label, nvidia_docker_label, nvidia_runtime_param])\n else:\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label])\n\n if not job_type.is_system:\n pre_label = DockerParameter('label', 'scale-task-type=pre')\n post_label = DockerParameter('label', 'scale-task-type=post')\n config.add_to_task('pre', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, pre_label])\n config.add_to_task('post', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, post_label])\n\n # Configure tasks for logging\n if settings.LOGGING_ADDRESS is not None:\n log_driver = DockerParameter('log-driver', 'fluentd')\n fluent_precision = DockerParameter('log-opt', 'fluentd-sub-second-precision=true')\n log_address = DockerParameter('log-opt', 'fluentd-address=%s' % settings.LOGGING_ADDRESS)\n if not job_type.is_system:\n pre_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('pre'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('pre', docker_params=[log_driver, fluent_precision, log_address, pre_task_tag])\n post_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('post'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('post', docker_params=[log_driver, fluent_precision, log_address, post_task_tag])\n # TODO: remove es_urls parameter when Scale no longer supports old style job types\n\n # Post task needs ElasticSearch URL to grab logs for old artifact registration\n es_param = DockerParameter('env', 'ELASTICSEARCH_URL=%s' % settings.ELASTICSEARCH_URL)\n config.add_to_task('post', docker_params=[es_param])\n main_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('main'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('main', docker_params=[log_driver, fluent_precision, log_address, main_task_tag])",
"def simulate_many(configs, top_type, env_type=SimEnvironment, jobs=None):\n pool_size = min(len(configs), multiprocessing.cpu_count())\n if jobs is not None:\n pool_size = min(pool_size, jobs)\n pool = multiprocessing.Pool(pool_size)\n sim_args = [(config, top_type, env_type) for config in configs]\n promise = pool.map_async(_simulate_trampoline, sim_args)\n if configs[0].get('sim.progress.enable'):\n _consume_progress(configs, jobs)\n return promise.get()",
"def run(self, *args, **kwargs) -> None:\n loop = tqdm(self.configs, desc='Configurations')\n for cfg in loop:\n loop.set_postfix_str(cfg.experiment_cfg['name'])\n for i in range(cfg.num_models):\n filename = None\n run_id = None\n if cfg.filenames is not None:\n if isinstance(cfg.filenames, str):\n filename = cfg.filenames\n else:\n filename = cfg.filenames[i]\n elif cfg.run_ids is not None:\n run_id = cfg.run_ids[i]\n\n run_cfg = modelgen_cfg_to_runner_cfg(cfg, run_id=run_id, filename=filename)\n runner = Runner(run_cfg, persist_metadata=cfg.experiment_cfg)\n runner.run()\n\n # clear up memory between runs\n torch.cuda.empty_cache()",
"def apply_configs(task):\n\n if \"3750X\" in task.host[\"sw_model\"]:\n # run 3750X function\n aaa_3750x(task)\n\n # apply global config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_global.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x global configuration applied ***\")\n # apply snmp config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_snmp.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: SNMP configuration applied ***\")\n # apply interface config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_intf.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x interface configuration applied ***\")",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def _configure_and_run_multiqc(\n analysis_paths_list,project_igf_id,sample_igf_id,work_dir,\n genome_build,multiqc_template_file,singularity_mutiqc_image,\n tool_order_list,multiqc_params,multiqc_exe='muliqc',dry_run=False):\n try:\n ### final check\n if len(analysis_paths_list)== 0:\n raise ValueError('No analysis file found for multiqc report')\n ### write a multiqc input file\n multiqc_input_file = \\\n os.path.join(work_dir,'multiqc.txt')\n with open(multiqc_input_file,'w') as fp:\n for file_path in analysis_paths_list:\n check_file_path(file_path)\n fp.write('{}\\n'.format(file_path))\n date_stamp = get_date_stamp()\n #\n # write multiqc config file\n #\n check_file_path(multiqc_template_file)\n multiqc_conf_file = \\\n os.path.join(\n work_dir,os.path.basename(multiqc_template_file))\n template_env = \\\n Environment(\n loader=\\\n FileSystemLoader(\n searchpath=os.path.dirname(multiqc_template_file)),\n autoescape=select_autoescape(['html', 'xml']))\n multiqc_conf = \\\n template_env.\\\n get_template(\n os.path.basename(multiqc_template_file))\n multiqc_conf.\\\n stream(\n project_igf_id=project_igf_id,\n sample_igf_id=sample_igf_id,\n tag_name='Single cell gene expression - {0}'.format(genome_build),\n date_stamp=date_stamp,\n tool_order_list=tool_order_list).\\\n dump(multiqc_conf_file)\n #\n # configure multiqc run\n #\n multiqc_report_title = \\\n 'Project:{0},Sample:{1}'.\\\n format(project_igf_id,sample_igf_id)\n multiqc_cmd = [\n multiqc_exe,\n '--file-list',multiqc_input_file,\n '--outdir',work_dir,\n '--title',multiqc_report_title,\n '-c',multiqc_conf_file] # multiqc base parameter\n if not isinstance(multiqc_params,list):\n raise TypeError(\n 'Expecting a list of params for multiqc run, got: {0}'.\\\n format(type(multiqc_params)))\n multiqc_cmd.\\\n extend(multiqc_params)\n #\n # configure singularity run\n #\n bind_dir_list = \\\n [os.path.dirname(path)\n for path in analysis_paths_list]\n bind_dir_list.append(work_dir)\n bind_dir_list = list(set(bind_dir_list))\n cmd = \\\n execute_singuarity_cmd(\n image_path=singularity_mutiqc_image,\n command_string=' '.join(multiqc_cmd),\n bind_dir_list=bind_dir_list,\n dry_run=dry_run)\n if dry_run:\n return None,None,cmd\n else:\n multiqc_html = None\n multiqc_data = None\n for root, _,files in os.walk(top=work_dir):\n for file in files:\n if fnmatch.fnmatch(file, '*.html'):\n multiqc_html = os.path.join(root,file)\n if fnmatch.fnmatch(file, '*.zip'):\n multiqc_data = os.path.join(root,file)\n if multiqc_html is None or \\\n multiqc_data is None:\n raise IOError('Failed to get Multiqc output file')\n check_file_path(multiqc_html)\n check_file_path(multiqc_data)\n return multiqc_html,multiqc_data,cmd\n except Exception as e:\n raise ValueError(\n 'Failed to configure and run multiqc, error: {0}'.\\\n format(e))",
"def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])",
"def run(self, config_file=None, partic_list=None):\n\n from time import strftime\n from qap_utils import raise_smart_exception, \\\n check_config_settings\n\n # in case we are overloading\n if config_file:\n from qap.script_utils import read_yml_file\n self._config = read_yml_file(config_file)\n self.validate_config_dict()\n self._config[\"pipeline_config_yaml\"] = config_file\n \n if not self._config:\n raise Exception(\"config not found!\")\n\n if partic_list:\n self._config[\"subject_list\"] = partic_list\n\n # Get configurations and settings\n check_config_settings(self._config, \"num_processors\")\n check_config_settings(self._config, \"num_sessions_at_once\")\n check_config_settings(self._config, \"available_memory\")\n check_config_settings(self._config, \"output_directory\")\n check_config_settings(self._config, \"working_directory\")\n\n self._num_bundles_at_once = 1\n write_report = self._config.get('write_report', False)\n\n if \"cluster_system\" in self._config.keys() and not self._bundle_idx:\n res_mngr = self._config[\"cluster_system\"]\n if (res_mngr == None) or (\"None\" in res_mngr) or \\\n (\"none\" in res_mngr):\n self._platform = None\n else:\n platforms = [\"SGE\", \"PBS\", \"SLURM\"]\n self._platform = str(res_mngr).upper()\n if self._platform not in platforms:\n msg = \"The resource manager %s provided in the pipeline \"\\\n \"configuration file is not one of the valid \" \\\n \"choices. It must be one of the following:\\n%s\" \\\n % (self._platform, str(platforms))\n raise_smart_exception(locals(), msg)\n else:\n self._platform = None\n\n # Create output directory\n try:\n os.makedirs(self._config[\"output_directory\"])\n except:\n if not op.isdir(self._config[\"output_directory\"]):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._config[\"output_directory\"]\n raise Exception(err)\n else:\n pass\n\n # Create working directory\n try:\n os.makedirs(self._config[\"working_directory\"])\n except:\n if not op.isdir(self._config[\"working_directory\"]):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._config[\"working_directory\"]\n raise Exception(err)\n else:\n pass\n\n results = []\n\n # set up callback logging\n import logging\n from nipype.pipeline.plugins.callback_log import log_nodes_cb\n\n cb_log_filename = os.path.join(self._config[\"output_directory\"],\n \"callback.log\")\n # Add handler to callback log file\n cb_logger = logging.getLogger('callback')\n cb_logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(cb_log_filename)\n cb_logger.addHandler(handler)\n\n # settle run arguments (plugins)\n self.runargs = {}\n self.runargs['plugin'] = 'MultiProc'\n self.runargs['plugin_args'] = \\\n {'memory_gb': int(self._config[\"available_memory\"]),\n 'status_callback': log_nodes_cb}\n n_procs = {'n_procs': self._config[\"num_processors\"]}\n self.runargs['plugin_args'].update(n_procs)\n\n # load the participant list file into dictionary\n subdict = self.load_sublist()\n\n # flatten the participant dictionary\n self._sub_dict = self.create_session_dict(subdict)\n\n # create the list of bundles\n self._bundles_list = self.create_bundles()\n num_bundles = len(self._bundles_list)\n\n if not self._bundle_idx:\n # want to initialize the run-level log directory (not the bundle-\n # level) only the first time we run the script, due to the\n # timestamp. if sub-nodes are being kicked off by a batch file on\n # a cluster, we don't want a new timestamp for every new node run\n self._run_log_dir = op.join(self._config['output_directory'],\n '_'.join([self._run_name, \"logs\"]),\n '_'.join([strftime(\"%Y%m%d_%H_%M_%S\"),\n \"%dbundles\" % num_bundles]))\n\n if self._run_log_dir:\n if not os.path.isdir(self._run_log_dir):\n try:\n os.makedirs(self._run_log_dir)\n except:\n if not op.isdir(self._run_log_dir):\n err = \"[!] Log directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % self._run_log_dir\n raise Exception(err)\n else:\n pass\n\n if num_bundles == 1:\n self._config[\"num_sessions_at_once\"] = \\\n len(self._bundles_list[0])\n\n # Start the magic\n if not self._platform and not self._bundle_idx:\n # not a cluster/grid run\n for idx in range(1, num_bundles+1):\n results.append(self.run_one_bundle(idx))\n\n elif not self._bundle_idx:\n # there is a self._bundle_idx only if the pipeline runner is run\n # with bundle_idx as a parameter - only happening either manually,\n # or when running on a cluster\n self.submit_cluster_batch_file(num_bundles)\n\n else:\n # if there is a bundle_idx supplied to the runner\n results = self.run_one_bundle(self._bundle_idx)"
] | [
"0.65946066",
"0.63634336",
"0.6124032",
"0.59440327",
"0.5874448",
"0.5866036",
"0.5825758",
"0.5822465",
"0.5777083",
"0.5734233",
"0.57098234",
"0.56977147",
"0.5667062",
"0.5592518",
"0.55613124",
"0.5558571",
"0.5541053",
"0.55395705",
"0.5511079",
"0.55040234",
"0.5491125",
"0.54881907",
"0.5481967",
"0.54742724",
"0.5472141",
"0.5443269",
"0.5423318",
"0.5404398",
"0.5403037",
"0.53934336"
] | 0.7368655 | 0 |
Walk the raw_metrics and pass them over to the agent as a counter or gauge type and with the correct units defined | def add_stats(self):
units = self.get_unit_map()
for metric in self.raw_metrics:
unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))
if metric_type == "counter":
# Unit/Second
unit = "/".join((unit, "Second"))
self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)
else:
self.add_gauge_value(metric, unit, self.raw_metrics[metric]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))",
"def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)",
"def set_metrics(self):",
"def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))",
"def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def metrics_group():",
"def compute_metrics(self):\n pass",
"def read_metrics(self):\n raise NotImplementedError()",
"def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])",
"def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter",
"def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def evaluate_raw(self):\n message = \"Use the meter.evaluate(data_collection) method for\" \\\n \"YamlDefinedMeters.\"\n raise NotImplementedError(message)",
"def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics",
"def raw_measure(self) -> List[int]:\n # name, command, signals, delay\n return self._run_profile((\"raw_measure\", [0x20, 0x50], 2, 0.025))",
"def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []",
"def _prepare_score_metrics(self, local_range=5, axis_infos=Axis3D_infos):\n if self.verbose:\n print(f\"- Calculate scoring metrics\")\n self.chr_2_metrics = {}\n if not hasattr(self, 'chr_2_cand_hzxys') or not hasattr(self, 'chr_2_cand_ids'):\n _chr_2_cand_hzxys = {}\n _chr_2_cand_ids = {}\n\n for _chr_name, _chr_centers in self.chr_2_homolog_centers.items():\n if hasattr(self, 'chr_2_cand_hzxys') and hasattr(self, 'chr_2_cand_ids') :\n _chr_hzxys = self.chr_2_cand_hzxys[_chr_name]\n _chr_ids = self.chr_2_cand_ids[_chr_name]\n else:\n # get coordinates\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n _chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n _chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate metrics\n if hasattr(self, 'chr_2_homolog_hzxys_list'):\n _ref_hzxys_list = self.chr_2_homolog_hzxys_list.get(_chr_name, None)\n else:\n _ref_hzxys_list = None\n self.chr_2_metrics[_chr_name] = prepare_score_metrics_by_chr(\n _chr_hzxys, _chr_ids, _chr_centers, \n prev_homolog_hzxys=_ref_hzxys_list, \n local_range=local_range)\n # add this attribute if not given previously\n if not hasattr(self, 'chr_2_cand_hzxys') or not hasattr(self, 'chr_2_cand_ids'):\n self.chr_2_cand_hzxys = _chr_2_cand_hzxys\n self.chr_2_cand_ids = _chr_2_cand_ids\n return",
"def get_run_metrics_handle(run_dir):\n #print(\"Examining: {}\".format(run_dir))\n\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n for v2l in (py_interop_run.Tile, py_interop_run.ExtendedTile):\n valid_to_load[v2l] = 1\n\n run_metrics = py_interop_run_metrics.run_metrics()\n run_metrics.read(run_dir, valid_to_load)\n\n return run_metrics",
"def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def consume_units(self, units):\n pass",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)",
"def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics",
"def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]"
] | [
"0.6258945",
"0.61666566",
"0.60249275",
"0.6001037",
"0.5927554",
"0.592675",
"0.57163465",
"0.57082105",
"0.56740946",
"0.5633948",
"0.56112427",
"0.5591662",
"0.5533326",
"0.5493495",
"0.54703236",
"0.54512477",
"0.54392433",
"0.5438802",
"0.54218864",
"0.54157937",
"0.5402703",
"0.5397436",
"0.53524804",
"0.5351322",
"0.5349167",
"0.5348826",
"0.5336088",
"0.53167844",
"0.53078514",
"0.52910113"
] | 0.682586 | 0 |
Walk the META dict and build a category/metric => [unit, type] map | def get_unit_map(self):
units = dict()
for t in META:
for c in META[t]:
for i in META[t][c]:
unit = DEFAULT_UNIT
if (isinstance(i, (tuple, list))):
val, unit = i
else:
val = i
# category/metric
n = "/".join((c, val))
units[n] = (unit, t)
return units | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_metric(self, slug):\n results = OrderedDict()\n granularities = self._granularities()\n keys = self._build_keys(slug)\n for granularity, key in zip(granularities, keys):\n results[granularity] = self.r.get(key)\n return results",
"def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()",
"def calculate_metrics(self):\n \n for cv in self.cat_vals:\n cat_inds = np.where(self.category_values == cv)[0]\n weighted_difference = (self.z[cat_inds]-self.mz[cat_inds])/self.weight_values[cat_inds]\n resid = np.sqrt(np.sum(np.square(weighted_difference))/(cat_inds.size))\n self.metric[str(cv)] = resid\n \n return self.metric",
"def measure_dict():\n out = base_dict()\n out['mro']['current'] = ['Measure']\n out['name']['current'] = 'Measure'\n ao(out, 'nSamples', 'Integer', 1, readLevel=3)\n ao(out, 'id', 'String', 'Conversion source ID', readLevel=3)\n ao(out, 'uid', 'String', 'Unique ID', readLevel=5)\n ao(out, 'date', 'Date', '00:00:00 01/01/2000', name='Test date')\n ao(out, 'zerotime', 'Float', name='Acquisition starting time', readLevel=4)\n ao(out, 'elapsed', 'Float', name='Test duration', unit='second')\n ao(out, 'operator', 'String', name='Operator')\n return out",
"def get_metrics(self, slug_list):\n # meh. I should have been consistent here, but I'm lazy, so support these\n # value names instead of granularity names, but respect the min/max\n # granularity settings.\n keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']\n key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}\n keys = [key_mapping[gran] for gran in self._granularities()]\n\n results = []\n for slug in slug_list:\n metrics = self.r.mget(*self._build_keys(slug))\n if any(metrics): # Only if we have data.\n results.append((slug, dict(zip(keys, metrics))))\n return results",
"def static_metrics(self) -> dict[str, float | int]:\n return self.performance[\"meta\"]",
"def test_unit_of_measurement(self):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n assert self.sensor_dict[name][\"units\"] == sensor.unit_of_measurement",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def get_unit_info(config_dict):\n try:\n group_dict = config_dict['StdReport']['StandardReport']['Units']['Groups']\n # Look for a strict superset of the group settings:\n if all(group_dict[group] == us_group[group] for group in us_group):\n return 'us'\n elif all(group_dict[group] == metric_group[group] for group in metric_group):\n return 'metric'\n elif all(group_dict[group] == metricwx_group[group] for group in metricwx_group):\n return 'metricwx'\n except KeyError:\n return None",
"def test_get_derived_metric_tags(self):\n pass",
"def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list",
"def _get_metrics(one_hot: bool) -> Mapping[Text, Any]:\n if one_hot:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.CategoricalAccuracy(),\n 'accuracy': tf.keras.metrics.CategoricalAccuracy(),\n 'top_1': tf.keras.metrics.CategoricalAccuracy(),\n 'top_5': tf.keras.metrics.TopKCategoricalAccuracy(\n k=5, name='top_5_accuracy'),\n }\n else:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'top_1': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'top_5': tf.keras.metrics.SparseTopKCategoricalAccuracy(\n k=5, name='top_5_accuracy'),\n }",
"def init_data(self, obj):\n for col in self._category_aux:\n key_split = col.split(' ', 1)\n if len(key_split) > 1:\n key = key_split[1].replace(' ', '')\n minmax = key_split[0].lower()\n info_tuple = (key, minmax)\n if minmax != 'min' and minmax != 'max':\n info_tuple = (col.replace(' ', ''), '')\n else:\n info_tuple = (col.replace(' ', ''), '') \n self.__info_extract(obj, info_tuple[0], info_tuple[1], col)",
"def some_meta():\n res = {'Computation-somemeta-0': {'Experiment': 'somemeta',\n 'Parameters': {'w': 5, 'x': 1, 'z': 4, 'y':7},\n 'Results': {'f1': 15, 'f2': 51}},\n 'Computation-somemeta-1': {'Experiment': 'somemeta',\n 'Parameters': {'w': 6, 'x': 1, 'z': 4, 'y':7},\n 'Results': {'f1': 16, 'f2': 61}},\n 'Computation-somemeta-2': {'Experiment': 'somemeta',\n 'Parameters': {'w': 5, 'x': 2, 'z': 4, 'y':7},\n 'Results': {'f1': 25, 'f2': 52}},\n 'Computation-somemeta-3': {'Experiment': 'somemeta',\n 'Parameters': {'w': 6, 'x': 2, 'z': 4, 'y':7},\n 'Results': {'f1': 26, 'f2': 62}},\n 'Computation-somemeta-4': {'Experiment': 'somemeta',\n 'Parameters': {'w': 5, 'x': 3, 'z': 4, 'y':7},\n 'Results': {'f1': 35, 'f2': 53}},\n 'Computation-somemeta-5': {'Experiment': 'somemeta',\n 'Parameters': {'w': 6, 'x': 3, 'z': 4, 'y':7},\n 'Results': {'f1': 36, 'f2': 63}}}\n\n # Notice the ordering\n domain = {'x':[\"1\", \"2\", \"3\"], 'w':[\"5\", \"6\"]}\n metadata = {'z':\"4\", 'y':'7'}\n parameters = [\"x\", \"w\"]\n parameters.sort()\n metrics = [\"f1\", \"f2\"]\n metrics.sort()\n exp_name = \"somemeta\"\n return exp_name, metadata, parameters, domain, metrics, res",
"def configure_metrics(self):\n allowed = list(METRIC_LOOKUP.keys()) + [None]\n metrics = nn.ModuleDict()\n for k, m in self.branch_metrics.items():\n for metric_name in m:\n if metric_name not in allowed:\n raise ValueError(\n f\"Illegal metric given. Got: {metric_name}. Allowed: {allowed}.\"\n )\n\n if metric_name is not None:\n metric = METRIC_LOOKUP[metric_name]()\n else:\n metric = None\n\n metrics[f\"{k}_{metric_name}\"] = metric\n\n return metrics",
"def format_odometer(raw) -> dict:\r\n instruments: dict = {}\r\n for instrument in raw:\r\n instruments[instrument[\"type\"]] = instrument[\"value\"]\r\n if \"unit\" in instrument:\r\n instruments[instrument[\"type\"] + \"_unit\"] = instrument[\"unit\"]\r\n\r\n return instruments",
"def describe_detailed(self) -> str:\n one_to_one = []\n one_to_many = []\n many_to_one = []\n many_to_many = []\n cats_a: set[Category] = set()\n cats_b: set[Category] = set()\n for rule in self.rules:\n cats_a.update(rule.factors_categories_a.keys())\n cats_b.update(rule.factors_categories_b.keys())\n if rule.cardinality_a == \"one\" and rule.cardinality_b == \"one\":\n one_to_one.append(rule)\n elif rule.cardinality_a == \"one\":\n one_to_many.append(rule)\n elif rule.cardinality_b == \"one\":\n many_to_one.append(rule)\n else:\n many_to_many.append(rule)\n\n cat_a, cat_b = self.categorization_a.name, self.categorization_b.name\n\n r = f\"# Mapping between {cat_a} and {cat_b}\\n\\n\"\n r += \"## Simple direct mappings\\n\\n\"\n r += \"\\n\".join(\n rule.format_human_readable(categorization_separator=\"\")\n for rule in one_to_one\n )\n r += \"\\n\\n\"\n r += f\"## One-to-many mappings - one {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in one_to_many)\n r += \"\\n\\n\"\n r += f\"## Many-to-one mappings - many {cat_a} to one {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_one)\n r += \"\\n\\n\"\n r += f\"## Many-to-many mappings - many {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_many)\n r += \"\\n\\n\"\n\n r += \"## Unmapped categories\\n\\n\"\n cats_missing_a = set(self.categorization_a.values()) - cats_a\n cats_missing_b = set(self.categorization_b.values()) - cats_b\n r += f\"### {cat_a}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_a)) + \"\\n\\n\"\n r += f\"### {cat_b}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_b)) + \"\\n\\n\"\n\n return r",
"def metrics_group():",
"def _make_category_groups(data_struct):\n groups = {}\n for cat in set(data_struct[\"Objects\"]): \n \n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n for dn in data_names:\n for idx in indices:\n groups[cat] = data_struct[dn][idx]\n return(groups)",
"def test_metric_map_values(self):\n url = reverse(\"metrics\")\n client = APIClient()\n\n params = {\"source_type\": Provider.PROVIDER_OCP}\n url = url + \"?\" + urlencode(params, quote_via=quote_plus) + \"&limit=11\"\n response = client.get(url, **self.headers).data[\"data\"]\n self.assertEqual(len(COST_MODEL_METRIC_MAP), len(response))\n for metric in COST_MODEL_METRIC_MAP:\n self.assertIsNotNone(metric.get(\"source_type\"))\n self.assertIsNotNone(metric.get(\"metric\"))\n self.assertIsNotNone(metric.get(\"label_metric\"))\n self.assertIsNotNone(metric.get(\"label_measurement_unit\"))\n self.assertIsNotNone(metric.get(\"default_cost_type\"))",
"def test_create_metrics_dict(self):\n # binary tasks have 1 class at class definition.\n num_classes = 1\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' not in metrics_dict.keys()\n\n num_classes = 3\n metrics_dict = create_metrics_dict(num_classes)\n assert 'iou_1' in metrics_dict.keys()\n assert 'iou_2' in metrics_dict.keys()\n assert 'iou_3' not in metrics_dict.keys()\n del metrics_dict",
"def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)",
"def calc_metrics(data, sampled_data_list, dataset_type):\n result={}\n for sampled_data in sampled_data_list:\n c2st_roc_auc_metric = c2st_roc_auc(data, sampled_data)\n if \"c2st_roc_auc\" in result:\n result[\"c2st_roc_auc\"].append(c2st_roc_auc_metric)\n else:\n result[\"c2st_roc_auc\"] = [c2st_roc_auc_metric]\n mmd_p_val, mmd_stat = rbf_mmd_test(data.values, sampled_data.values)\n if \"mmd_p_val\" in result:\n result[\"mmd_p_val\"].append(mmd_p_val)\n result[\"mmd_stat\"].append(mmd_stat)\n else:\n result[\"mmd_p_val\"] = [mmd_p_val]\n result[\"mmd_stat\"] = [mmd_stat]\n ks_p_val, ks_stat, ks_n, ks_p_val_list, ks_stat_list = ks_test(data, sampled_data)\n if dataset_type != \"norm_dataset\":\n ks_p_val = ks_permutation(ks_stat_list, data, sampled_data)\n if \"ks_p_val\" in result:\n result[\"ks_p_val\"].append(ks_p_val)\n result[\"ks_stat\"].append(ks_stat)\n else:\n result[\"ks_p_val\"] = [ks_p_val]\n result[\"ks_stat\"] = [ks_stat]\n acc_r, acc_g = c2st_accuracy(data, sampled_data)\n if \"c2st_acc_r\" in result:\n result[\"c2st_acc_r\"].append(acc_r)\n result[\"c2st_acc_g\"].append(acc_g)\n else:\n result[\"c2st_acc_r\"] = [acc_r]\n result[\"c2st_acc_g\"] = [acc_g]\n return result",
"def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }",
"def testStepBuildStatsMap(self):\n self._StringToMapHelper(data_types.StepBuildStatsMap, data_types.BuildStats)",
"def test_set_derived_metric_tags(self):\n pass",
"def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g",
"def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]",
"def compute_metrics(meters):\n metrics = {m: vs.avg for m, vs in meters.items()}\n metrics = {m: v if isinstance(v, float) else v.item() for m, v in metrics.items()}\n return metrics",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge"
] | [
"0.5816675",
"0.57289094",
"0.5644376",
"0.56161416",
"0.5572126",
"0.5567753",
"0.5550242",
"0.5526054",
"0.53678817",
"0.5350343",
"0.5344897",
"0.53356266",
"0.53257257",
"0.5325444",
"0.52578825",
"0.52578384",
"0.5251907",
"0.5248301",
"0.52464676",
"0.5242211",
"0.52412844",
"0.52364653",
"0.52275234",
"0.51984775",
"0.51858854",
"0.51702523",
"0.51630723",
"0.5143102",
"0.51349473",
"0.5103489"
] | 0.78846425 | 0 |
Given a list of names, return the values collected for those names as a list. If any are missing, then return None. | def get_values(self, names):
r = []
for n in names:
if n in self.raw_metrics:
r.append(self.raw_metrics[n])
else:
return None
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def values(names, location, field = 0):\n table = read_properties(location)\n result = []\n for name in names:\n result.append(table[name][field])\n return result",
"def get_name_value(self):\n name, value = self.get()\n if not isinstance(name, list):\n name = [name]\n if not isinstance(value, list):\n value = [value]\n return list(zip(name, value))",
"def queryMpValues( canonicalNames, nothrow=False ):\n from subarrayCommands import queryMonitorPoint\n def querySingleListOfMpValues( canonicalNameList, nothrow ):\n mps = queryMonitorPoint( canonicalNameList )\n mpValues = []\n for m in mps:\n if m.isValid():\n mpValues.append( m.value() )\n else:\n if nothrow: mpValues.append( None )\n else: raise Exception( m.name_ + \" is not valid!\" )\n\n return mpValues # Return as a list\n\n def recursiveMpListQuery( canonicalNameList, nothrow ):\n output = []\n for i in canonicalNameList:\n if list == type( i ):\n output.append(\n recursiveMpListQuery(i, nothrow) )\n else:\n output = querySingleListOfMpValues( canonicalNameList,\n nothrow)\n break\n return output\n\n return recursiveMpListQuery( canonicalNames, nothrow )",
"def get_list_of_present_entries(list_):\n\n _List = []\n for x in list_:\n if x not in _List:\n if not None:\n if not pd.isna(x):\n _List.append(x)\n return _List",
"def get_valid(self, names):\n\n ret = [None] * len(names)\n posdict = dict([(name, i) for i, name in enumerate(names)])\n\n for compname, varnames in partition_names_by_comp(names).items():\n if compname is None:\n vals = super(Assembly, self).get_valid(varnames)\n for i, val in enumerate(vals):\n ret[posdict[varnames[i]]] = val\n else:\n comp = getattr(self, compname)\n if isinstance(comp, Component):\n vals = comp.get_valid(varnames)\n else:\n vals = [self._valid_dict['.'.join([compname, vname])] for vname in varnames]\n for i, val in enumerate(vals):\n full = '.'.join([compname, varnames[i]])\n ret[posdict[full]] = val\n return ret",
"def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names",
"def get_values(self, *fields):\n result = []\n for field in fields:\n if isinstance(field, (str, basestring)):\n result.append(self.get_value(field))\n elif isinstance(field, (tuple, list)):\n result.append(self.get_values(*field))\n else:\n result.append(None)\n return result",
"def get_all_names(cls, exclude_values: Iterator['CommonBucksType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list",
"def setValues(names):\r\n results = []\r\n for name in names:\r\n altName = \"_%s\" % name\r\n if name in kwargs.keys():\r\n results.append(kwargs[name])\r\n elif name in self.__dict__.keys():\r\n results.append(self.__dict__[name])\r\n elif altName in self.__dict__.keys(): \r\n results.append(self.__dict__[altName])\r\n else:\r\n raise RuntimeError(\"%s: not found\" % name)\r\n return results",
"def sum_of(self, names):\n vals = self.get_values(names)\n if vals is None:\n return None\n return sum(vals)",
"def get_all_names(cls, exclude_values: Iterator['CommonGameTagCategory'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list",
"def values(self, *keys: _K) -> t.List[t.Any]: # type: ignore[override]\n if keys:\n d: t.List[t.Any] = []\n for key in keys:\n try:\n i = self.index(key)\n except KeyError:\n d.append(None)\n else:\n d.append(self[i])\n return d\n return list(self)",
"def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values",
"def get_all_names(cls, exclude_values: Iterator['CommonBusinessAdvertisingType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list",
"def get_attr_values_with_name(self, name):\n return [attr.value for attr in self.get_attrs_with_name(name)]",
"def get_results(self, case_names, params):\n results = []\n for name in case_names:\n results.append(self.get_result(name, params))\n return results",
"def values(self, items_list):\n return [self.resolve(value) for value in items_list]",
"def getProbeList(probeNames, probeMap):\n trace (\"getProbeList(%s)\" %probeNames)\n probeList = []\n probeNameList = probeNames.split(',')\n for probeName in probeNameList:\n probe = probeMap.get(probeName.strip().lower())\n if probe:\n probeList.append (probe)\n else:\n print \"Unable to find %s in %s\" %(probeName,str(probeMap))\n return probeList",
"def get_values(self):\n \n return []",
"def _maybe_match_names(self, other):\n if len(self.names) != len(other.names):\n return [None] * len(self.names)\n names = []\n for a_name, b_name in zip(self.names, other.names):\n if a_name == b_name:\n names.append(a_name)\n else:\n # TODO: what if they both have np.nan for their names?\n names.append(None)\n return names",
"def get_list_vars(my_vars):\n lists = []\n for var in my_vars:\n try:\n temp = my_vars[var].getValue()\n #print var + '=' + str(temp)\n except ValueError:\n lists.append(var)\n return lists",
"def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res",
"def GetListVariable(self, name):\n var = self._makefile.variables.get(name, expand=True)[2]\n if not var:\n return []\n return var.resolvesplit(self._makefile, self._makefile.variables)",
"def values(line):\n return [v.strip() or None for v in text(line).split(',')]",
"def extract_full_names(people):\n result = []\n \n for lst in names:\n x = ''\n for name in lst.values():\n x += ' ' + name \n x = x[1:] \n result.append(x)\n return result",
"def _get_attrs(self, names):\n assert isinstance(names, str)\n names = names.replace(\",\", \" \").split(\" \")\n res = []\n for n in names:\n if n == \"\":\n continue\n if n not in self.__dict__:\n raise KeyError(\"Unknown name for Container attribute: '{}'\".format(n))\n res.append(getattr(self, n))\n return res",
"def get_values(self, no_values=400):\r\n return [self.get_value() for i in range(no_values)]",
"def get_units(self, names):\n # Make sure names is a list\n if isinstance(names, str) or isinstance(names, unicode):\n names = [names]\n \n # Return the list of units\n ans = []\n for name in names:\n if name in self.interp_ds:\n ans.append(self.interp_ds[name].attrs['units'])\n else:\n ans.append('Not Available in Dataset')\n \n return ans",
"def get_values(self, dates):\n ret = []\n for d in dates:\n ret.append(self.data[d])\n return ret",
"def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)"
] | [
"0.62341124",
"0.61295545",
"0.6060184",
"0.58670735",
"0.58584267",
"0.5791712",
"0.5745358",
"0.56358737",
"0.5626649",
"0.5603033",
"0.55910695",
"0.5588045",
"0.55808705",
"0.5564574",
"0.5495613",
"0.5486652",
"0.5477916",
"0.54763335",
"0.54682344",
"0.546777",
"0.5415812",
"0.53828233",
"0.5374573",
"0.5354404",
"0.534432",
"0.531956",
"0.5303475",
"0.5299547",
"0.5290446",
"0.5283988"
] | 0.75494033 | 0 |
Given a list of metric names, return the sum of their values if all of them exist in the raw metrics, otherwise return None. | def sum_of(self, names):
vals = self.get_values(names)
if vals is None:
return None
return sum(vals) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r",
"def merge_measurements(measurements_list: List[Measurements]) -> \\\n Tuple[Measurements, List[MetricName]]:\n summed_metrics: Measurements = {}\n\n all_metrics_names = set() # Sum of set of names.\n for measurements in measurements_list:\n all_metrics_names.update(measurements.keys())\n\n for metric_name in all_metrics_names:\n if metric_name in METRICS_METADATA:\n\n if METRICS_METADATA[metric_name].type == MetricType.GAUGE:\n operation = lambda values: sum(values) / len(values) # noqa\n else:\n assert METRICS_METADATA[metric_name].type == MetricType.COUNTER\n operation = sum\n\n else:\n log.debug('By default, unknown metric %r uses \"sum\" as merge operation.', metric_name)\n operation = sum\n\n summed_metrics[metric_name] = operation(\n [measurements[metric_name] for measurements in measurements_list\n if metric_name in measurements])\n\n return summed_metrics",
"def calc_total(records, names):\n total = 0\n for rec in records:\n if rec['name'] in names:\n total += rec['price']\n return total",
"def sum_values(values):\n return (sum(values))",
"def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass",
"def sum_activity_metrics(activityDict, metricNames, activityName=\"main_thread\"):\n assert isinstance(activityDict, dict)\n \n return [sum(x) for x in zip(\n *(get_activity_samples(activityDict, metricNames, activityName).values()))]",
"def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics",
"def compare_sum(values, weights):\n return np.sum(values.numpy())",
"def sum_tensors(xs: List[Optional[torch.Tensor]]) ->Optional[torch.Tensor]:\n idx = next((idx for idx, tensor in enumerate(xs) if tensor is not None), -1)\n if idx == -1:\n return None\n ret = xs[idx]\n for tensor in xs[idx + 1:]:\n if tensor is not None:\n ret = ret + tensor\n return ret",
"def sumAllValues(self,*toSkip):\n sum=0\n for counterKey in self.counters.keys():\n if not counterKey in toSkip: sum += self.counters[counterKey]\n # 026 #self.debug.mainLogger.debug(\"Sumation of all counters finished with result %i.\"%(sum))\n return sum",
"def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures",
"def sum(values):\n total = 0\n for i in values:\n total += i\n return total",
"def data_dict_add_total(data_dict, sum_args, feat_name):\n for key in data_dict:\n data_dict[key][feat_name] = 0\n for feat in sum_args:\n if data_dict[key][feat] != 'NaN':\n data_dict[key][feat_name] += data_dict[key][feat]",
"def action_store_sum(raw_val):\n\n if isinstance(raw_val, list):\n val_sum = None\n for val in raw_val:\n val = auto_type_convert(val)\n if isinstance(val, (int, float)):\n if val_sum is None:\n val_sum = val\n else:\n val_sum += val\n return val_sum\n else:\n return None",
"def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:\n return sum(mxd_lst)",
"def sum_accumulators(accs):\n valid = [acc for acc in accs if acc]\n if len(valid) == 0:\n return None\n\n ret = valid[0]\n for v in valid[1:]:\n ret += v\n return ret",
"def summarize_metrics(metrics):\n summarized = {}\n for k in metrics:\n if k.endswith('mse'):\n summarized[k[:-3] + 'rmse'] = np.sqrt(np.mean(metrics[k]))\n elif k.startswith('err'):\n summarized[k + '_mean'] = np.mean(metrics[k])\n summarized[k + '_rmse'] = np.sqrt(np.mean(metrics[k]**2))\n elif k.endswith('nomean'):\n summarized[k] = metrics[k]\n else:\n summarized[k] = np.mean(metrics[k])\n\n return summarized",
"def sum(self, values):\n return self.aggregate(values, \"sum\")",
"def get_union_metrics(metric_a, metric_b):\n if metric_a is None and metric_b is None:\n return None\n elif metric_a is None:\n return metric_b\n elif metric_b is None:\n return metric_a\n else:\n # The order of metric_list need to be consistent among all hosts in distributed training\n # So we have metric_list sorted here.\n metric_list = sorted(list(set(metric_a).union(metric_b)))\n return metric_list",
"def sum(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"sum\", skipna)\n return k, cast(pdarray, v)",
"def sum_dstats(self, stats, smetrics):\n avg = {}\n\n for disk, metrics in stats.iteritems():\n for mname, metric in metrics.iteritems():\n if mname not in smetrics:\n continue\n if mname in avg:\n avg[mname] += metric\n else:\n avg[mname] = metric\n\n return avg",
"def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):\n outs = []\n if N >= 0:\n outputs = outputs[:N]\n for i in range(len(outputs[0])):\n scalar = np.array(map(lambda x: x[i], outputs))\n assert (scalar.ndim == 1)\n add_value_to_summary(metric_summary, names[i], np.mean(scalar),\n tag_str='{:>27s}: [{:s}]: %f'.format(names[i], ''))\n outs.append(np.mean(scalar))\n return outs",
"def calculate_metrics(jobs, metrics_names):\n metrics_def_dict = {mn: {'metric': mn.split('_')[0], 'agg': mn.split('_')[1], 'data': [], 'value': -1} for mn in metrics_names}\n\n for job in jobs:\n if job['category'] == 'run' and job['jobstatus'] == 'finished':\n for mn, mdata in metrics_def_dict.items():\n if 'per' in mdata['metric']:\n if mdata['metric'].split('per')[0] in job and mdata['metric'].split('per')[1] in job and job[mdata['metric'].split('per')[1]] > 0:\n mdata['data'].append(job[mdata['metric'].split('per')[0]]/(1.0*job[mdata['metric'].split('per')[1]]))\n elif mdata['metric'] in job and job[mdata['metric']]:\n mdata['data'].append(job[mdata['metric']])\n\n for mn, mdata in metrics_def_dict.items():\n if 'avg' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])/(1.0*len(mdata['data'])) if len(mdata['data']) > 0 else -1\n if 'sum' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])\n\n metrics = {}\n for mn, mdata in metrics_def_dict.items():\n if mdata['value'] > 0:\n if 'percent' in mdata['agg']:\n metrics[mn] = round(mdata['value'] * 100.0, 2)\n else:\n metrics[mn] = round(mdata['value'], 2)\n\n return metrics",
"def get_metrics(cm, list_metrics):\n dic_metrics = {}\n total = np.sum(cm)\n\n if 'accuracy' in list_metrics:\n out = np.sum(np.diag(cm))\n dic_metrics['accuracy'] = out/total\n\n if 'pres_0' in list_metrics:\n num = cm[0, 0]\n den = cm[:, 0].sum()\n dic_metrics['pres_0'] = num/den if den > 0 else 0\n\n if 'pres_1' in list_metrics:\n num = cm[1, 1]\n den = cm[:, 1].sum()\n dic_metrics['pres_1'] = num/den if den > 0 else 0\n\n if 'recall_0' in list_metrics:\n num = cm[0, 0]\n den = cm[0, :].sum()\n dic_metrics['recall_0'] = num/den if den > 0 else 0\n\n if 'recall_1' in list_metrics:\n num = cm[1, 1]\n den = cm[1, :].sum()\n dic_metrics['recall_1'] = num/den if den > 0 else 0\n\n return dic_metrics",
"def _generalised_sum(data, func):\n count = len(data)\n if func is None:\n total = math.fsum(data)\n else:\n total = math.fsum(func(x) for x in data)\n return count, total",
"def sum_node_list(node_list):\n node_list = [n for n in node_list if n is not None]\n if node_list == []:\n return None\n\n from operator import add\n from functools import reduce\n return reduce(add, node_list)",
"def test_accumulation(preds, targets, exact_match, f1):\n squad_metric = SQuAD()\n for pred, target in zip(preds, targets):\n squad_metric.update(preds=[pred], target=[target])\n metrics_score = squad_metric.compute()\n\n _assert_tensor(metrics_score[\"exact_match\"])\n _assert_tensor(metrics_score[\"f1\"])\n _assert_allclose(metrics_score[\"exact_match\"], torch.mean(torch.tensor(exact_match)))\n _assert_allclose(metrics_score[\"f1\"], torch.mean(torch.tensor(f1)))",
"def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()",
"def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)",
"def metric_checker(self, metrics):\n self.logger.debug(\"Checking metrics: '%s'\", metrics)\n metrics = metrics.split(\",\")\n reference = self.get_metrics()\n for metric in metrics:\n if metric in reference:\n pass\n else:\n raise InvalidMetricError(\"Invalid metrics: '{}'\".format(metric))"
] | [
"0.63208723",
"0.58659804",
"0.57584655",
"0.5594722",
"0.5587642",
"0.5583846",
"0.54215026",
"0.53978634",
"0.5388517",
"0.53796774",
"0.5361299",
"0.5329915",
"0.5302537",
"0.5300705",
"0.5294816",
"0.5272846",
"0.52468646",
"0.5246003",
"0.52124965",
"0.5141806",
"0.512744",
"0.51267344",
"0.5117094",
"0.5094158",
"0.505983",
"0.50584435",
"0.5052294",
"0.5040794",
"0.50263655",
"0.49951562"
] | 0.7123606 | 0 |
Given a list of metric names, return the result of the first subtracted by all others (a b c d ...). If any metric names do not exist, return None. | def diff_of(self, names):
vals = self.get_values(names)
if vals is None:
return None
return vals[0] - (sum(vals[1:])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subtract(*args):\n return args[0] - reduce(lambda x, y: x + y, args[1:])",
"def subtract(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n difference = str(ft.reduce(oper.sub,values))\n\n return difference",
"def subtract(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n difference = str(args[0] - args[1])\n return difference",
"def get_union_metrics(metric_a, metric_b):\n if metric_a is None and metric_b is None:\n return None\n elif metric_a is None:\n return metric_b\n elif metric_b is None:\n return metric_a\n else:\n # The order of metric_list need to be consistent among all hosts in distributed training\n # So we have metric_list sorted here.\n metric_list = sorted(list(set(metric_a).union(metric_b)))\n return metric_list",
"def get_cup_metric(name):\r\n for metric in cup_metrics:\r\n if metric.__name__.lower() == name.lower():\r\n return metric\r\n raise AttributeError",
"def calc_metric(output, metrics):\n score = []\n for metric in metrics:\n metric_mod = __import__(\"sklearn.metrics\", fromlist=[metric])\n metric_func = getattr(metric_mod, metric)\n score.append(metric_func(output[0], output[1]))\n return score, output",
"def _get_metric(name):\n try:\n return metrics.metric(name)\n except InvalidMetricError:\n return None",
"def diff(*args):\n return reduce(lambda x, y: x - y, args)",
"def get_unit_by_metric(metric):\n for item in PROMETHEUS_METRICS_LIST:\n if item['name'] == metric:\n return item['unit']\n return \"\"",
"def subtract(minuend, *values):\r\n result = minuend\r\n for value in values:\r\n result -= value\r\n return result",
"def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures",
"def subtract(first, second):\n return first - second",
"def get_metric(name):\n return metric_name_to_function_mapping[name.lower()]",
"def subtract(numbers):\n\n dif = numbers[0]\n for i in numbers[1:]:\n dif = dif - i\n return dif",
"def get_delta(name):\n\n # get metrics\n [curr_metrics, last_metrics] = get_metrics()\n\n # get delta\n name = name[len(NAME_PREFIX):] # remove prefix from name\n try:\n delta = (curr_metrics['data'][name] - last_metrics['data'][name])/(curr_metrics['time'] - last_metrics['time'])\n if delta < 0:\n delta = 0\n except StandardError:\n delta = 0\n\n return delta",
"def subtract(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = result - n\n return result",
"def kkSub(*args):\n if (None in args):\n return None\n return abs(args[0]-args[1])",
"def __call__(self, *args):\n result = self\n if len(args) == 1:\n if np.isscalar(args[0]) or args[0] is None:\n result -= args[0]\n else:\n for i in args[0]:\n result -= i\n return result\n if np.isscalar(args[0]) or args[0] is None:\n result += args[0]\n else:\n for i in args[0]:\n result += i\n for i in args[1:]:\n if np.isscalar(i) or i is None:\n result -= i\n else:\n for j in i:\n result -= j\n return result",
"def construct_metric_function(metric_name):\n if(metric_name == \"mse\"):\n def metric_function(result, expected):\n return np.mean((result - expected)**2)\n elif(metric_name == \"logmse\"):\n def metric_function(result, expected):\n return np.log10(np.mean((result - expected)**2))\n\n elif(metric_name == \"rmse\"):\n def metric_function(result, expected):\n return np.sqrt(np.mean((result - expected)**2))\n\n elif(metric_name == \"mae\"):\n def metric_function(result, expected):\n return np.mean(np.abs(result - expected))\n\n else:\n raise ValueError(\"Unrecognized metric name = %s\" % metric_name)\n\n return metric_function",
"def find_best_k_and_metric(data):\n metrics_and_scores = []\n possible_metrics = [euclidean_distance, manhattan_distance, hamming_distance, cosine_distance]\n for k in range(1, len(data)):\n for metric in possible_metrics:\n cross_validation_score = cross_validate(data, k, metric)\n metrics_and_scores.append([k, metric, cross_validation_score])\n sorted_metrics = sorted(metrics_and_scores, key = lambda item:item[2])\n return (sorted_metrics[-1][0], sorted_metrics[-1][1])",
"def calc_single_metric(trains, metric, tau):\n logger.info(\"Calculating metric %s for time_scale %s.\" % (metric, str(tau)))\n return metrics[metric](trains, tau)",
"def select_best_th(metrics_dict: Dict, metric: str):\n max_metric_ix = np.argmax(metrics_dict[metric])\n return metrics_dict['metrics_ths'][max_metric_ix]",
"def wrapper_subtract_med(args):\n\n return subtract_med(*args)",
"def get_metric_func(\n metric: str,\n) -> Callable[[Union[List[int], List[float]], List[float]], float]:\n # Note: If you want to add a new metric, please also update the parser argument --metric in parsing.py.\n if metric == \"auc\":\n return roc_auc_score\n\n if metric == \"prc-auc\":\n return prc_auc\n\n if metric == \"rmse\":\n return rmse\n\n if metric == \"mae\":\n return mean_absolute_error\n\n if metric == \"r2\":\n return r2_score\n\n if metric == \"accuracy\":\n return accuracy\n\n if metric == \"recall\":\n return recall\n\n if metric == \"sensitivity\":\n return sensitivity\n\n if metric == \"specificity\":\n return specificity\n\n raise ValueError(f'Metric \"{metric}\" not supported.')",
"def _metric_max_over_ground_truths(metric_fn: Callable[[str, str], Tensor], prediction: str, ground_truths: List[str]) ->Tensor:\n return max(metric_fn(prediction, truth) for truth in ground_truths)",
"def fn(i, m):\n if i + 2*m >= len(piles): return prefix[-1] - prefix[i]\n ans = -inf \n for ii in range(1, 2*m+1): \n if i+ii < len(prefix): \n ans = max(ans, prefix[i+ii] - prefix[i] - fn(i+ii, max(m, ii)))\n return ans",
"def subtract(*args):\n body = ['<h1>Subtraction Calculator</h1>']\n diff = reduce(lambda x,y: x - y, map(int,args))\n body.append(f'Total equals: {diff}')\n return '\\n'.join(body)",
"def subtract(*args):\n\n result = int(args[0]) - int(args[1])\n\n return str(result)",
"def subtract(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas:\n\t\t\tresult -= data\n\t\treturn result",
"def v3minus(a, b):\n return [a[i] - b[i] for i, j in enumerate(a)]"
] | [
"0.5903671",
"0.5564674",
"0.5434356",
"0.53246856",
"0.5131955",
"0.50864434",
"0.5047729",
"0.5025216",
"0.5021303",
"0.49535123",
"0.49406168",
"0.49197245",
"0.48909643",
"0.48661345",
"0.48607007",
"0.48527187",
"0.4794334",
"0.4729753",
"0.47213003",
"0.471478",
"0.47043067",
"0.46917048",
"0.46873707",
"0.46852773",
"0.46483755",
"0.46216536",
"0.46210274",
"0.46143296",
"0.4612236",
"0.45734608"
] | 0.5787716 | 1 |
Update the raw metrics for a particular metric name if the value is a number. | def update_metric(self, metric, value):
if self.is_number(value):
self.logger.debug("Collected raw metric: %s = %s" % (metric, value))
self.raw_metrics[metric] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))",
"def apply_filter_metrics(self, pack_nr, filter_metrics):\n current_pack_metrics = ast.literal_eval(self.list_pack[pack_nr]['metrics'])\n\n for i in filter_metrics:\n if i in current_pack_metrics:\n filter_metrics[i] = current_pack_metrics[i]\n\n self.list_pack[pack_nr]['metrics'] = filter_metrics",
"def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()",
"def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")",
"def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)",
"def updateNumerosity(self, num):\n self.numerosity += num",
"async def update(self, key, value_increment):\n try:\n payload = PayloadBuilder()\\\n .WHERE([\"key\", \"=\", key])\\\n .EXPR([\"value\", \"+\", value_increment])\\\n .payload()\n self._storage.update_tbl(\"statistics\", payload)\n except:\n _logger.exception(\n 'Unable to update statistics value based on statistics_key %s and value_increment %s'\n , key, value_increment)\n raise",
"def update_metrics(self, metrics, predictions, labels):\n return",
"def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]",
"def update_scalar(self, name: str, value: float, epoch_idx: int = None) -> None:\n self._writer.add_scalar(name, value, global_step=(epoch_idx if epoch_idx is not None else self.epoch_num))",
"def watch_numbers(self, value: str) -> None:\n self.query_one(\"#numbers\", Digits).update(value)",
"def set_stat(stats: Dict[str, StatisticsType], name: str, value: str):\n value = value.strip('\"')\n tt = StdStatisticTypes.get(name, None)\n if tt is timedelta or (tt is None and (\"time\" in name or \"Time\" in name)):\n time_us = int(float(value) * 1000000)\n stats[name] = timedelta(microseconds=time_us)\n elif tt is not None:\n stats[name] = tt(value)\n else:\n try:\n stats[name] = int(value)\n return\n except ValueError:\n try:\n stats[name] = float(value)\n return\n except ValueError:\n stats[name] = value",
"def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value",
"def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self",
"def _update(self, handle, value):\n _LOGGER.debug(\n \"%s: %15s temperature = %-2d.%-2d, humidity = %3d\",\n handle,\n self.name,\n value[0],\n value[2],\n value[1],\n )\n self.data[\"temp\"] = float(\"%d.%d\" % (value[0], value[2]))\n self.data[\"humid\"] = value[1]",
"def update_numeric_width(self, eval_dict):\n # If width is already a number, do nothing\n if isinstance(self.width, int):\n self.width_numeric = self.width\n return\n self.width_numeric = eval(self.width.replace(\"`\", \"\"), eval_dict)\n if not isinstance(self.width_numeric, int):\n logger.error(\"Could not evaluate width {} of wire {}\".format(self.width_numeric, self.name))",
"def update(self,d:dict):\n for name,(value,n) in d.items():\n if n==0:\n continue\n self.meters[name].update(value,n)",
"def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key",
"def changeMetrics(self, metrics):\n if isinstance(metrics,list) == False:\n metrics = [metrics]\n self.metrics = metrics\n\n whatMetrics = []\n\n for i in metrics:\n if i == RMSE:\n whatMetrics.append(\"RMSE\")\n elif i == f1Score:\n whatMetrics.append(\"f1Score\")\n elif i == recall:\n whatMetrics.append(\"recall\")\n elif i == precision:\n whatMetrics.append(\"precision\")\n elif i == mean_squared_error:\n whatMetrics.append(\"mean_squared_error\")\n elif i == mean_absolute_error:\n whatMetrics.append(\"mean_absolute_error\")\n elif i == mean_absolute_percentage_error:\n whatMetrics.append(\"mean_absolute_percentage_error\")\n elif isinstance(i,str):\n whatMetrics.append(i)\n else:\n print(\"I don't know what to do with : \" + str(i))\n\n self.metricsAsString = whatMetrics",
"def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def update_magic(self, tally, value='mean', threshold=1.0, ratio=5.0):\n _dll.openmc_weight_windows_update_magic(self._index,\n tally._index,\n c_char_p(value.encode()),\n threshold,\n ratio)",
"def sum(self, key, value):\n self._metrics[key] += value",
"def fusion_api_update_metrics_configuration(self, body, api=None, headers=None):\n return self.metrics.update(body, api, headers)",
"def updateNumber(self, deviceName, propertyName):\n\n if self.device is None:\n return False\n if deviceName != self.name:\n return False\n\n for element, value in self.device.getNumber(propertyName).items():\n key = propertyName + '.' + element\n self.data[key] = value\n\n # only version 2 has 3 dew heaters\n if element == 'DEW_C':\n if self.versionUPB != 2:\n self.versionUPB = 2\n self.signals.version.emit(2)\n\n # print(propertyName, element, value)\n\n return True",
"def updateMeter(self, name1, name2, op):\r\n mini = 0\r\n maxi = 100\r\n pos = (self.var.get() - mini) / (maxi - mini)\r\n self.updateMeterLine(pos * 0.6 + 0.2)",
"def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})",
"def resetValue(self, attr_name, value, calc_level):\n if not getattr(self, attr_name) == value:\n setattr(self, attr_name, float(value))\n # Reset calculation level\n self.calc_level = min(self.calc_level, calc_level)",
"def handle_metrics(split: str, metrics: Dict[str, Union[int, float]], output_dir: str):\n\n logger.info(f\"***** {split} metrics *****\")\n for key in sorted(metrics.keys()):\n value = metrics[key]\n if isinstance(value, float):\n value = round(value, 4)\n logger.info(f\" {key} = {value}\")\n save_json(metrics, os.path.join(output_dir, f\"{split}_results.json\"))",
"def Update(self, value):\n self.SetValue(self.GetValue() + tf.cast(value, self.dtype))"
] | [
"0.5767613",
"0.56614435",
"0.56110084",
"0.5580715",
"0.55691767",
"0.544993",
"0.53968453",
"0.5386266",
"0.5385655",
"0.53586227",
"0.5325535",
"0.5323981",
"0.52706116",
"0.5256082",
"0.525114",
"0.5226717",
"0.5223895",
"0.5173679",
"0.51539004",
"0.51441383",
"0.5132418",
"0.50851536",
"0.50724393",
"0.50660366",
"0.5052708",
"0.5036889",
"0.5028768",
"0.500585",
"0.5001698",
"0.49975103"
] | 0.8006188 | 0 |
Derive all of the custom newrelic metric data from what we've collected. | def derive_newrelic_stats(self):
self.logger.debug("Collecting stats for newrelic")
self.derive_newrelic_volume()
self.derive_newrelic_throughput()
self.derive_newrelic_innodb()
self.derive_newrelic_qcache()
self.derive_newrelic_slaves() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def _calculate_custom_data(self):\n if self.limit is not None:\n self.data['pct'] = self.usage * 100.0 / self.limit\n if self.units == 'hours':\n self.time = timedelta(hours=self.usage)\n self.data['name'] = self.id",
"def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]",
"def calculate_dataset_metrics(self):\n pass",
"def derive_newrelic_qcache(self):\n # Query Cache\n vals = self.get_values([\"status/qcache_hits\", \"status/com_select\", \"status/qcache_free_blocks\",\n \"status/qcache_total_blocks\", \"status/qcache_inserts\", \"status/qcache_not_cached\"])\n if vals:\n qc_hits, reads, free, total, inserts, not_cached = vals\n\n self.update_metric(\"newrelic/query_cache_hits\", qc_hits)\n self.update_metric(\"newrelic/query_cache_misses\", inserts)\n self.update_metric(\"newrelic/query_cache_not_cached\", not_cached)\n\n pct_query_cache_hit_utilization = 0.0\n if (qc_hits + reads) > 0:\n pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_query_cache_hit_utilization\", pct_query_cache_hit_utilization)\n\n pct_query_cache_memory_in_use = 0.0\n if total > 0:\n pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)\n\n self.update_metric(\"newrelic/pct_query_cache_memory_in_use\", pct_query_cache_memory_in_use)\n\n # Temp Table\n vals = self.get_values([\"status/created_tmp_tables\", \"status/created_tmp_disk_tables\"])\n if vals:\n tmp_tables, tmp_tables_disk = vals\n\n pct_tmp_tables_written_to_disk = 0.0\n if tmp_tables > 0:\n pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0\n\n self.update_metric(\"newrelic/pct_tmp_tables_written_to_disk\", pct_tmp_tables_written_to_disk)",
"def customAttributes(self):\n print(\"setting custom attributes\")\n data = self.getData(\"daily_usage\")\n date = data[-1][\"readTime\"]\n\n attributes = {}\n attributes[\"date\"] = date\n last_reset = date - timedelta(days=1)\n # attributes[\"last_reset\"] = last_reset\n return attributes",
"def _add_cat_fields(self, odata, copy=True):\n # these are required fileds from get_meds_output_dtype\n # that we have put into the input catalog\n always_copy=[\n 'id',\n 'ra',\n 'dec',\n ]\n cat = self.cat_orig\n\n add_dt = []\n for d in cat.dtype.descr:\n n = d[0]\n if n not in odata.dtype.names:\n add_dt.append(d)\n\n obj_data = eu.numpy_util.add_fields(\n odata,\n add_dt,\n )\n\n if copy:\n for n in always_copy:\n obj_data[n] = cat[n]\n\n for d in add_dt:\n n = d[0]\n if n in always_copy:\n continue\n\n # don't clobber things that should be left at\n # their default values\n if n not in odata.dtype.names:\n obj_data[n] = cat[n]\n\n\n return obj_data",
"def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)",
"def test_get_derived_metric(self):\n pass",
"def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect",
"def test_get_all_derived_metrics(self):\n pass",
"def test_create_derived_metric(self):\n pass",
"def __init__(self, replication_num, metric_name_array, metric_collection_types = None, detailed_metric_assembly = False):\n self.replication_num = replication_num\n self.metrics = metric_name_array\n self.metric_collection_types = metric_collection_types # can be a string array elements of which can be one of ('STRING_LIST', 'COUNT_MAX', 'MEAN_STD','MIN','MAX', 'MIN_MAX') \n self.detailed_metric_assembly = detailed_metric_assembly\n self.replication_counter = 0\n self.metric_final_results = {}\n # initialize results array for each metric\n for metric in metric_name_array:\n self.metric_final_results[metric] = []",
"def compute_metrics(self):\n pass",
"def metrics_group():",
"def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)",
"def derive_newrelic_throughput(self):\n # read and write throughput\n self.update_metric(\"newrelic/bytes_reads\", self.sum_of([\"status/bytes_sent\"]))\n self.update_metric(\"newrelic/bytes_writes\", self.sum_of([\"status/bytes_received\"]))\n\n # Connection management\n vals = self.get_values([\"status/threads_connected\", \"status/threads_running\", \"status/threads_cached\"])\n if vals:\n connected, running, cached = vals\n self.update_metric(\"newrelic/connections_connected\", connected)\n self.update_metric(\"newrelic/connections_running\", running)\n self.update_metric(\"newrelic/connections_cached\", cached)\n pct_connection_utilization = 0.0\n if vals[0] > 0:\n pct_connection_utilization = (running / connected) * 100.0\n self.update_metric(\"newrelic/pct_connection_utilization\", pct_connection_utilization)",
"def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data",
"def _build_collection_attribute_data_for_cellranger(\n metrics_file, collection_name, collection_type, attribute_name='attribute_name',\n attribute_value='attribute_value', attribute_prefix=None, load_new_metrix_file=True):\n try:\n check_file_path(metrics_file)\n if load_new_metrix_file:\n attribute_data = \\\n pd.read_csv(metrics_file)\n attribute_data = \\\n attribute_data[attribute_data['Library or Sample']=='Sample']\n attribute_data = attribute_data[['Metric Name', 'Metric Value']]\n attribute_data.columns = [\n attribute_name,\n attribute_value]\n else:\n attribute_data = \\\n pd.read_csv(metrics_file).T.\\\n reset_index()\n attribute_data.columns = [\n attribute_name,\n attribute_value]\n if attribute_prefix is None:\n attribute_data[attribute_name] = \\\n attribute_data[attribute_name].\\\n map(lambda x: x.replace(' ','_'))\n else:\n attribute_data[attribute_name] = \\\n attribute_data[attribute_name].\\\n map(lambda x: \\\n '{0}_{1}'.format(\\\n attribute_prefix,\n x.replace(' ','_')))\n attribute_data['name'] = collection_name\n attribute_data['type'] = collection_type\n attribute_data[attribute_value] = \\\n attribute_data[attribute_value].astype(str)\n attribute_data[attribute_value] = \\\n attribute_data[attribute_value].\\\n map(lambda x: \\\n x.replace(',',''))\n attribute_data = \\\n attribute_data.\\\n to_dict(orient='records')\n return attribute_data\n except Exception as e:\n raise ValueError(\n 'Failed to build collection attribute data for collection {0}:{1}, error: {2}'.\\\n format(collection_name,collection_type,e))",
"def apply_metrics(x):\n d = {}\n d[\"custom_metric\"] = custom_metric(\n x[\"actuals\"], x[\"forecast\"], x[\"avg_vol\"].values[0]\n )\n d[\"uncertainty_metric\"] = uncertainty_metric(\n x[\"actuals\"], x[\"upper_bound\"], x[\"lower_bound\"], x[\"avg_vol\"].values[0]\n )\n\n return pd.Series(d, index=[\"custom_metric\", \"uncertainty_metric\"])",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def set_metrics(self):",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def _build_eval_metrics(self, results, features, labels):\n metrics = {}\n for metric in self.metrics:\n metrics[metric.IDENTIFIER] = getters.get_metric(\n metric.IDENTIFIER, results, labels, **metric.to_dict())\n return metrics",
"def test_get_derived_metric_history(self):\n pass",
"def _generate(self, custom_data: typing.Dict) -> typing.Dict:\n info = {}\n for field in self.fields:\n if field.name in custom_data:\n info[field.name] = custom_data[field.name]\n else:\n info[field.name] = field.generate(info)\n\n return info",
"def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret",
"def getMeasures():",
"def _calculate_custom_data(self):\n self.data['vms'] = Vms(self.vms, self.url)",
"def get_data_extra(self, initial):\n extra = {\n 'distance':'10',\n 'latitude':'0',\n 'longitude':'1'\n }\n return dict(initial.items() + extra.items())"
] | [
"0.6022074",
"0.59213793",
"0.5731898",
"0.56916195",
"0.56569976",
"0.56055313",
"0.5598628",
"0.5564767",
"0.55601907",
"0.55311126",
"0.5521709",
"0.5505745",
"0.5444377",
"0.5430692",
"0.54174036",
"0.5376228",
"0.5355599",
"0.53036517",
"0.5295715",
"0.5289679",
"0.5281834",
"0.52087873",
"0.5194723",
"0.51905227",
"0.5183394",
"0.51817125",
"0.51771504",
"0.5158137",
"0.51560324",
"0.51543903"
] | 0.7011071 | 0 |
Derive the newrelic read/write volume metrics | def derive_newrelic_volume(self):
# read and write volume
self.update_metric("newrelic/volume_reads", self.sum_of(["status/com_select", "status/qcache_hits"]))
self.update_metric("newrelic/volume_writes", self.sum_of(["status/com_insert", "status/com_insert_select",
"status/com_update", "status/com_update_multi",
"status/com_delete", "status/com_delete_multi",
"status/com_replace", "status/com_replace_select"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()",
"def derive_newrelic_throughput(self):\n # read and write throughput\n self.update_metric(\"newrelic/bytes_reads\", self.sum_of([\"status/bytes_sent\"]))\n self.update_metric(\"newrelic/bytes_writes\", self.sum_of([\"status/bytes_received\"]))\n\n # Connection management\n vals = self.get_values([\"status/threads_connected\", \"status/threads_running\", \"status/threads_cached\"])\n if vals:\n connected, running, cached = vals\n self.update_metric(\"newrelic/connections_connected\", connected)\n self.update_metric(\"newrelic/connections_running\", running)\n self.update_metric(\"newrelic/connections_cached\", cached)\n pct_connection_utilization = 0.0\n if vals[0] > 0:\n pct_connection_utilization = (running / connected) * 100.0\n self.update_metric(\"newrelic/pct_connection_utilization\", pct_connection_utilization)",
"def total_volume(self):",
"def get_disk_rw(sampling_duration):\n \n #get te list of devices\n with open('/proc/partitions') as f:\n devices = [re.search('\\s([^\\s]+)$', line).group(1).strip() for line in re.findall('^\\s*[0-9]+\\s+[1-9]+.*$', f.read(), flags = re.MULTILINE)]\n \n with open('/proc/diskstats') as f1:\n with open('/proc/diskstats') as f2:\n content1 = f1.read() #first collection\n yield {} #yield so that caller can put delay before sampling again\n content2 = f2.read() #second collection\n \n #initialize the dict with interfaces and values\n data = dict(zip(devices, [dict(zip(['reads', 'writes'], [0, 0])) for device in devices]))\n\n for line in content1.splitlines(): #read through first collection\n for device in [device_x for device_x in devices if '%s ' % device_x in line]:\n fields = line.strip().split('%s ' % device)[1].split()\n data[device]['reads'] = int(fields[0])\n data[device]['writes'] = int(fields[4])\n break\n \n for line in content2.splitlines(): #read through second collection\n for device in [device_x for device_x in devices if '%s ' % device_x in line]:\n fields = line.strip().split('%s ' % device)[1].split()\n data[device]['reads'] = (int(fields[0]) - data[device]['reads']) / float(sampling_duration)\n data[device]['writes'] = (int(fields[4]) - data[device]['writes']) / float(sampling_duration)\n break \n \n yield data",
"def __init__(self):\n super().__init__()\n self.metric = 'VOL'",
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def __init__(self):\n super().__init__()\n self.metric = 'SEGVOL'",
"def __init__(self):\n super().__init__()\n self.metric = 'GTVOL'",
"def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data",
"def volumes(self):",
"def read_metrics(self):\n raise NotImplementedError()",
"def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)",
"def compute(self, inputs, outputs):\n #super().compute(inputs, outputs)\n outputs['stuff'] = inputs['widths'] * 2\n outputs['areas'] = inputs['lengths'] * 2\n\n outputs['total_volume'] = np.sum(outputs['areas']) + np.sum(outputs['stuff'])",
"def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result",
"def derive_newrelic_qcache(self):\n # Query Cache\n vals = self.get_values([\"status/qcache_hits\", \"status/com_select\", \"status/qcache_free_blocks\",\n \"status/qcache_total_blocks\", \"status/qcache_inserts\", \"status/qcache_not_cached\"])\n if vals:\n qc_hits, reads, free, total, inserts, not_cached = vals\n\n self.update_metric(\"newrelic/query_cache_hits\", qc_hits)\n self.update_metric(\"newrelic/query_cache_misses\", inserts)\n self.update_metric(\"newrelic/query_cache_not_cached\", not_cached)\n\n pct_query_cache_hit_utilization = 0.0\n if (qc_hits + reads) > 0:\n pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_query_cache_hit_utilization\", pct_query_cache_hit_utilization)\n\n pct_query_cache_memory_in_use = 0.0\n if total > 0:\n pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)\n\n self.update_metric(\"newrelic/pct_query_cache_memory_in_use\", pct_query_cache_memory_in_use)\n\n # Temp Table\n vals = self.get_values([\"status/created_tmp_tables\", \"status/created_tmp_disk_tables\"])\n if vals:\n tmp_tables, tmp_tables_disk = vals\n\n pct_tmp_tables_written_to_disk = 0.0\n if tmp_tables > 0:\n pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0\n\n self.update_metric(\"newrelic/pct_tmp_tables_written_to_disk\", pct_tmp_tables_written_to_disk)",
"def _update_volume_stats(self):\n self._ensure_shares_mounted()\n data = {}\n lcfg = self.configuration\n backend_name = self.configuration.safe_get('volume_backend_name')\n data['volume_backend_name'] = backend_name or self.__class__.__name__\n data['vendor_name'] = 'Oracle'\n data['driver_version'] = self.VERSION\n data['storage_protocol'] = self.protocol\n\n asn = self.zfssa.get_asn()\n data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share)\n\n free, used = self._get_share_capacity_info()\n capacity = float(free) + float(used)\n ratio_used = used / capacity\n\n data['QoS_support'] = False\n data['reserved_percentage'] = 0\n\n used_percentage_limit = 100 - self.configuration.reserved_percentage\n used_ratio_limit = used_percentage_limit / 100.0\n if (ratio_used > used_ratio_limit or\n ratio_used >= self.configuration.max_over_subscription_ratio):\n data['reserved_percentage'] = 100\n\n data['total_capacity_gb'] = float(capacity) / units.Gi\n data['free_capacity_gb'] = float(free) / units.Gi\n\n share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,\n lcfg.zfssa_nfs_project,\n lcfg.zfssa_nfs_share)\n pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool)\n\n data['zfssa_compression'] = share_details['compression']\n data['zfssa_encryption'] = share_details['encryption']\n data['zfssa_logbias'] = share_details['logbias']\n data['zfssa_poolprofile'] = pool_details['profile']\n data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes)\n\n self._stats = data",
"def __init__(self):\n super().__init__()\n self.metric = 'VOLSMTY'",
"def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))",
"def subbandwidth(self):",
"def compute_metrics(self):\n pass",
"def collect():\n\n command = \"cat /proc/meminfo |grep MemTotal|awk -F' ' '{print $2}'\"\n memTotal_f = round(float(os.popen(command).read())/1024/1000,0)\n memTotal = int(memTotal_f)\n cmd = 'df -h |grep \"/dev/s\"'\n metric_disk = os.popen(cmd).readlines()\n hardNum=[]\n for i in metric_disk:\n hard_space = float((i.strip().split()[1])[:-1])\n hardNum.append(hard_space)\n\n disk_info = sum(hardNum)\n disk_use = {}\n metric_disks=os.popen('df -x tmpfs -x devtmpfs | grep -Eo \" /\\S*$\" ').readlines()\n for disk in metric_disks:\n cmd = 'df|grep -E \"%s$\"' % disk.strip()\n disks = os.popen(cmd).readlines()[0]\n disk_list = disks.split()\n disk_use[disk_list[5]]=disk_list[4]\n hard = {\n \"disk_used\" : disk_use,\n \"disk_total\":disk_info,\n \"mem_total\":memTotal\n }\n\n return hard",
"def test_update_volume_stats(self):\n actual = self.driver.get_volume_stats(True)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual(90, actual['total_capacity_gb'])\n self.assertEqual(87, actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])",
"def load_snapshot(base_path, snap_num, subvolumes, group, fields, matches):\n n_init = []\n\n snap_key = 'N{}_ThisFile_Redshift'.format('groups' if group == 'Haloprop' else 'subgroups')\n for subvolume in subvolumes: \n n_init.append(load_header(base_path, subvolume)[snap_key][snap_num])\n \n # initialize objects structure\n result = {}\n \n with h5py.File(file_path(base_path, subvolumes[0], 'subvolume'), 'r') as f:\n # galprop and haloprop both have a redshift quantity so we can use that to query for the snapshot we want\n filter_field = '{}Redshift'.format(group)\n \n if not fields:\n fields = list(f[group].keys())\n\n # make sure the redshift field is included in fields\n if filter_field not in fields:\n fields.append(filter_field) \n \n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n shape = list(f[group][field].shape)\n shape[0] = np.sum(n_init)\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n if matches:\n with h5py.File(file_path(base_path, subvolumes[0], 'matches'), 'r') as f:\n for field in f[group].keys():\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n header = load_header(base_path, subvolumes[0])\n filter_condition = header['Redshifts'][snap_num]\n\n offset = 0\n\n for subvolume in subvolumes:\n subvol_result = load_subvolume(base_path, subvolume, group, fields, matches, False)\n\n idx = subvol_result[filter_field][:] == filter_condition\n\n for field in subvol_result.keys():\n if len(subvol_result[field].shape) != 1:\n result[field][offset:offset+n_init[0], :] = subvol_result[field][idx]\n else:\n result[field][offset:offset+n_init[0]] = subvol_result[field][idx]\n\n offset += n_init[0]\n del n_init[0]\n \n return result",
"def volume(self):\n return sum([x[\"counter_volume\"] for x in self.usage])",
"def _update_volume_stats(self):\n LOG.debug('Updating volume stats')\n total_space = 0\n free_space = 0\n share = None\n for _share in self._mounted_shares:\n if self.shares_with_capacities[_share]['free'] > free_space:\n free_space = self.shares_with_capacities[_share]['free']\n total_space = self.shares_with_capacities[_share]['total']\n share = _share\n\n location_info = '%(driver)s:%(share)s' % {\n 'driver': self.__class__.__name__,\n 'share': share\n }\n nms_url = self.share2nms[share].url\n self._stats = {\n 'vendor_name': 'Nexenta',\n 'dedup': self.volume_deduplication,\n 'compression': self.volume_compression,\n 'description': self.volume_description,\n 'nms_url': nms_url,\n 'ns_shares': self.shares_with_capacities,\n 'driver_version': self.VERSION,\n 'storage_protocol': constants.NFS,\n 'total_capacity_gb': total_space,\n 'free_capacity_gb': free_space,\n 'reserved_percentage': self.configuration.reserved_percentage,\n 'QoS_support': False,\n 'location_info': location_info,\n 'volume_backend_name': self.backend_name,\n 'nfs_mount_point_base': self.nfs_mount_point_base\n }",
"def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))",
"def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data",
"def compute_statistics(self):",
"def get_net_rw(sampling_duration):\n \n interfaces = [file for file in os.listdir('/sys/class/net/')] #network interfaces\n \n with open('/proc/net/dev') as f1:\n with open('/proc/net/dev') as f2:\n content1 = f1.read() #first collection\n yield {} #yield so that caller can put delay before sampling again\n content2 = f2.read() #second collection\n \n #initialize the dict with interfaces and values\n data = dict(zip(interfaces, [dict(zip(['reads', 'writes'], [0, 0])) for interface in interfaces]))\n \n for line in content1.splitlines(): #read through first collection\n for interface in [interface_x for interface_x in interfaces if '%s:' % interface_x in line]:\n fields = line.split('%s:' % interface)[1].split()\n data[interface]['reads'] = int(fields[0])\n data[interface]['writes'] = int(fields[8])\n break\n \n for line in content2.splitlines(): #read through second collection\n for interface in [interface_x for interface_x in interfaces if '%s:' % interface_x in line]:\n fields = line.split('%s:' % interface)[1].split()\n data[interface]['reads'] = (int(fields[0]) - data[interface]['reads']) / float(sampling_duration)\n data[interface]['writes'] = (int(fields[8]) - data[interface]['writes']) / float(sampling_duration)\n break\n \n yield data",
"def test_get_derived_metric(self):\n pass"
] | [
"0.6909179",
"0.6324424",
"0.587309",
"0.58164966",
"0.57896197",
"0.5775137",
"0.5725627",
"0.56265193",
"0.5589847",
"0.55038005",
"0.5489829",
"0.54812914",
"0.5480851",
"0.54732245",
"0.5446111",
"0.54351324",
"0.5425124",
"0.53942096",
"0.53008056",
"0.5265735",
"0.5252054",
"0.52440643",
"0.52329785",
"0.52118796",
"0.5195342",
"0.5154504",
"0.5129259",
"0.51289886",
"0.51146495",
"0.51142734"
] | 0.8065374 | 0 |
Derive the newrelic throughput metrics | def derive_newrelic_throughput(self):
# read and write throughput
self.update_metric("newrelic/bytes_reads", self.sum_of(["status/bytes_sent"]))
self.update_metric("newrelic/bytes_writes", self.sum_of(["status/bytes_received"]))
# Connection management
vals = self.get_values(["status/threads_connected", "status/threads_running", "status/threads_cached"])
if vals:
connected, running, cached = vals
self.update_metric("newrelic/connections_connected", connected)
self.update_metric("newrelic/connections_running", running)
self.update_metric("newrelic/connections_cached", cached)
pct_connection_utilization = 0.0
if vals[0] > 0:
pct_connection_utilization = (running / connected) * 100.0
self.update_metric("newrelic/pct_connection_utilization", pct_connection_utilization) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()",
"def compute_metrics(self):\n pass",
"def _report_metrics(self, total_bytes, time_delta, num_files):\n # This recreates the gsutil throughput calculation so that metrics are 1:1.\n avg_speed = round(float(total_bytes) / float(time_delta))\n report(\n source_scheme=self._source_scheme,\n destination_scheme=self._destination_scheme,\n num_files=num_files,\n size=total_bytes,\n avg_speed=avg_speed,\n disk_io_time=self._calculate_disk_io())",
"def __init__(self, metrics, gt, pred):\n self.dict_metrics = self.compute_metrics(metrics, gt, pred)",
"def calculate_batch_metrics(self):\n pass",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def compute_statistics(self):",
"def throughput(conn):\n c = conn.cursor()\n last_time = maxtime(conn)\n total_tasks = totaltasks(conn)\n\n return {\n \"throughput_tasks_per_second\": (total_tasks / (last_time / (10**6)))\n }",
"def metrics_group():",
"def set_metrics(self):",
"def subbandwidth(self):",
"def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result",
"def calculate_dataset_metrics(self):\n pass",
"def derive_newrelic_qcache(self):\n # Query Cache\n vals = self.get_values([\"status/qcache_hits\", \"status/com_select\", \"status/qcache_free_blocks\",\n \"status/qcache_total_blocks\", \"status/qcache_inserts\", \"status/qcache_not_cached\"])\n if vals:\n qc_hits, reads, free, total, inserts, not_cached = vals\n\n self.update_metric(\"newrelic/query_cache_hits\", qc_hits)\n self.update_metric(\"newrelic/query_cache_misses\", inserts)\n self.update_metric(\"newrelic/query_cache_not_cached\", not_cached)\n\n pct_query_cache_hit_utilization = 0.0\n if (qc_hits + reads) > 0:\n pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_query_cache_hit_utilization\", pct_query_cache_hit_utilization)\n\n pct_query_cache_memory_in_use = 0.0\n if total > 0:\n pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)\n\n self.update_metric(\"newrelic/pct_query_cache_memory_in_use\", pct_query_cache_memory_in_use)\n\n # Temp Table\n vals = self.get_values([\"status/created_tmp_tables\", \"status/created_tmp_disk_tables\"])\n if vals:\n tmp_tables, tmp_tables_disk = vals\n\n pct_tmp_tables_written_to_disk = 0.0\n if tmp_tables > 0:\n pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0\n\n self.update_metric(\"newrelic/pct_tmp_tables_written_to_disk\", pct_tmp_tables_written_to_disk)",
"def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)",
"def advancedStats():",
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def test_get_derived_metric(self):\n pass",
"def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) ->Dict[str, Tensor]:\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0))\n fmeasure = 2 * precision * recall / (precision + recall)\n return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure))",
"def test_get_all_derived_metrics(self):\n pass",
"def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity",
"def calculate_global_throughput(samples, bucket_interval_secs=1):\n samples_per_task = {}\n # first we group all warmup / measurement samples by operation.\n for sample in samples:\n k = sample.task\n if k not in samples_per_task:\n samples_per_task[k] = []\n samples_per_task[k].append(sample)\n\n global_throughput = {}\n # with open(\"raw_samples.csv\", \"w\") as sample_log:\n # print(\"client_id,absolute_time,relative_time,operation,sample_type,total_ops,time_period\", file=sample_log)\n for k, v in samples_per_task.items():\n task = k\n if task not in global_throughput:\n global_throughput[task] = []\n # sort all samples by time\n current_samples = sorted(v, key=lambda s: s.absolute_time)\n\n total_count = 0\n interval = 0\n current_bucket = 0\n current_sample_type = current_samples[0].sample_type\n sample_count_for_current_sample_type = 0\n start_time = current_samples[0].absolute_time - current_samples[0].time_period\n for sample in current_samples:\n # print(\"%d,%f,%f,%s,%s,%d,%f\" %\n # (sample.client_id, sample.absolute_time, sample.relative_time, sample.operation, sample.sample_type,\n # sample.total_ops, sample.time_period), file=sample_log)\n\n # once we have seen a new sample type, we stick to it.\n if current_sample_type < sample.sample_type:\n current_sample_type = sample.sample_type\n sample_count_for_current_sample_type = 0\n\n total_count += sample.total_ops\n interval = max(sample.absolute_time - start_time, interval)\n\n # avoid division by zero\n if interval > 0 and interval >= current_bucket:\n sample_count_for_current_sample_type += 1\n current_bucket = int(interval) + bucket_interval_secs\n throughput = (total_count / interval)\n # we calculate throughput per second\n global_throughput[task].append(\n (sample.absolute_time, sample.relative_time, current_sample_type, throughput, \"%s/s\" % sample.total_ops_unit))\n # also include the last sample if we don't have one for the current sample type, even if it is below the bucket interval\n # (mainly needed to ensure we show throughput data in test mode)\n if interval > 0 and sample_count_for_current_sample_type == 0:\n throughput = (total_count / interval)\n global_throughput[task].append(\n (sample.absolute_time, sample.relative_time, current_sample_type, throughput, \"%s/s\" % sample.total_ops_unit))\n\n return global_throughput",
"def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)",
"def __init__(self):\n super().__init__()\n self.metric = 'TP'",
"def throughputbin(conn):\n c = conn.cursor()\n bc = bincount(conn)\n total_tasks = totaltasks(conn)\n return {\n \"throughput_tasks_per_bin\": total_tasks / bc['bins']['count']\n }",
"def test_create_derived_metric(self):\n pass",
"def stats(self):",
"def compute_metrics(self, results: list) -> dict:",
"def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit",
"def __init__(self):\n super().__init__()\n self.metric = 'AVGDIST'"
] | [
"0.69189394",
"0.61661357",
"0.5897504",
"0.58614343",
"0.5855015",
"0.58358943",
"0.58298737",
"0.5791972",
"0.5702153",
"0.56862736",
"0.56187415",
"0.5615706",
"0.56001467",
"0.55977714",
"0.5567835",
"0.5560966",
"0.5535466",
"0.5501375",
"0.54764473",
"0.54760295",
"0.5455046",
"0.5445555",
"0.5415534",
"0.53755814",
"0.5358267",
"0.53575844",
"0.5337179",
"0.53313035",
"0.5304255",
"0.528015"
] | 0.7748093 | 0 |
Derive the newrelic innodb metrics | def derive_newrelic_innodb(self):
# InnoDB Metrics
vals = self.get_values(["status/innodb_pages_created", "status/innodb_pages_read",
"status/innodb_pages_written", "status/innodb_buffer_pool_read_requests",
"status/innodb_buffer_pool_reads", "status/innodb_data_fsyncs",
"status/innodb_os_log_fsyncs"])
if vals:
created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals
self.update_metric("newrelic/innodb_bp_pages_created", created)
self.update_metric("newrelic/innodb_bp_pages_read", read)
self.update_metric("newrelic/innodb_bp_pages_written", written)
hit_ratio = 0.0
if (bp_read_requests + bp_reads) > 0:
hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0
self.update_metric("newrelic/pct_innodb_buffer_pool_hit_ratio", hit_ratio)
self.update_metric("newrelic/innodb_fsyncs_data", data_fsync)
self.update_metric("newrelic/innodb_fsyncs_os_log", log_fsync)
# InnoDB Buffer Metrics
vals = self.get_values(["status/innodb_buffer_pool_pages_total", "status/innodb_buffer_pool_pages_data",
"status/innodb_buffer_pool_pages_misc", "status/innodb_buffer_pool_pages_dirty",
"status/innodb_buffer_pool_pages_free"])
if vals:
pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals
unassigned = pages_total - pages_data - pages_free - pages_misc
self.update_metric("newrelic/innodb_buffer_pool_pages_clean", pages_data - pages_dirty)
self.update_metric("newrelic/innodb_buffer_pool_pages_dirty", pages_dirty)
self.update_metric("newrelic/innodb_buffer_pool_pages_misc", pages_misc)
self.update_metric("newrelic/innodb_buffer_pool_pages_free", pages_free)
self.update_metric("newrelic/innodb_buffer_pool_pages_unassigned", unassigned) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()",
"def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def test_get_all_derived_metrics(self):\n pass",
"def compute_metrics(self):\n pass",
"def __init__(self, metrics, schema, table, nid):\n\n self.id = nid\n self.metrics = metrics\n self.schema = schema\n self.table = table\n self.batch_size = 20\n self.__init_metrics()",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def calculate_dataset_metrics(self):\n pass",
"def test_create_derived_metric(self):\n pass",
"def fetch_metrics(self):\n\n self.explain_all_indices()",
"def test_get_derived_metric(self):\n pass",
"def derive_newrelic_throughput(self):\n # read and write throughput\n self.update_metric(\"newrelic/bytes_reads\", self.sum_of([\"status/bytes_sent\"]))\n self.update_metric(\"newrelic/bytes_writes\", self.sum_of([\"status/bytes_received\"]))\n\n # Connection management\n vals = self.get_values([\"status/threads_connected\", \"status/threads_running\", \"status/threads_cached\"])\n if vals:\n connected, running, cached = vals\n self.update_metric(\"newrelic/connections_connected\", connected)\n self.update_metric(\"newrelic/connections_running\", running)\n self.update_metric(\"newrelic/connections_cached\", cached)\n pct_connection_utilization = 0.0\n if vals[0] > 0:\n pct_connection_utilization = (running / connected) * 100.0\n self.update_metric(\"newrelic/pct_connection_utilization\", pct_connection_utilization)",
"def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def test_get_derived_metric_history(self):\n pass",
"def set_metrics(self):",
"def metrics_group():",
"def calculate_batch_metrics(self):\n pass",
"def compute_statistics(self):",
"def insert_metrics(params):\n\n outgoing_metrics_path = os.path.join(\"s3://\" + params['learner']['bucket'],\n params['learner']['prefix'], params['learner']['metrics'])\n outgoing_metrics = pd.read_csv(outgoing_metrics_path)\n # Connect\n con = psycopg2.connect(host=params[\"labeller\"][\"db_host\"], database=params[\"labeller\"][\"db_production_name\"],\n user=params[\"labeller\"][\"db_username\"], password=params[\"labeller\"][\"db_password\"])\n curs = con.cursor()\n print('cursor made')\n\n # Update the iteration_metrics table\n try:\n insert_query = \"insert into iteration_metrics \" \\\n \"(run, iteration, tss, accuracy, aoi, iteration_time, precision, \" \\\n \"recall, fpr, tpr, auc) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s); \"\n outgoing_metrics = outgoing_metrics[outgoing_metrics['iteration'] == get_current_iteration(params)]\n outgoing_metrics = outgoing_metrics[outgoing_metrics['run'] == params['learner']['runid']]\n # this is needed for multiple runs for multiple aois. incoming_names.csv will need an aoi column and its\n # corresponding table will need to have a aoi column that is a key like run and iteration\n # or we have a different incoming_names.csv for each aoi\n # outgoing_metrics = outgoing_metrics[outgoing_metrics['run']==params['learner']['aoiid']]\n outgoing_metrics = outgoing_metrics.reindex(\n columns=[\"run\", \"iteration\", \"tss\", \"accuracy\", \"aoi\", \"iteration_time\", \"precision\", \"recall\", \"fpr\",\n \"tpr\", \"AUC\"])\n outgoing_list = list(outgoing_metrics.iloc[0])\n # converts numpy types to basic python types for DB\n for i, n in enumerate(outgoing_list):\n if type(n) is not str:\n outgoing_list[i] = n.item()\n curs.execute(insert_query, outgoing_list)\n con.commit()\n print('Finished saving out the iteration metrics')\n except psycopg2.DatabaseError as err:\n print(\"Error updating database\")\n print(err)\n finally:\n if con:\n con.close()",
"def test_update_derived_metric(self):\n pass",
"def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }",
"def get_cnstat(self):\n def get_counters(table_id):\n \"\"\"\n Get the counters from specific table.\n \"\"\"\n fields = [\"0\"]*BUCKET_NUM\n\n for pos, cntr_list in counter_bucket_dict.items():\n for counter_name in cntr_list:\n full_table_id = COUNTER_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = str(int(fields[pos]) + int(counter_data))\n\n cntr = NStats._make(fields)\n return cntr\n\n def get_rates(table_id):\n \"\"\"\n Get the rates from specific table.\n \"\"\"\n fields = [\"0\",\"0\",\"0\",\"0\",\"0\",\"0\"]\n for pos, name in enumerate(rates_key_list):\n full_table_id = RATES_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = float(counter_data)\n cntr = RateStats._make(fields)\n return cntr\n\n # Get the info from database\n counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP);\n # Build a dictionary of the stats\n cnstat_dict = OrderedDict()\n cnstat_dict['time'] = datetime.datetime.now()\n ratestat_dict = OrderedDict()\n if counter_port_name_map is None:\n return cnstat_dict, ratestat_dict\n for port in natsorted(counter_port_name_map):\n port_name = port.split(\":\")[0]\n if self.multi_asic.skip_display(constants.PORT_OBJ, port_name):\n continue\n cnstat_dict[port] = get_counters(counter_port_name_map[port])\n ratestat_dict[port] = get_rates(counter_port_name_map[port])\n return cnstat_dict, ratestat_dict",
"def metrics(self, adapt=[\"mean\"], ):\n def func(record):\n df = recorddf(record)\n des = df.describe().loc[adapt, :]\n metric_dict = dict()\n\n epoch_now = list(df.epoch)[-1]\n des = des.drop(\"epoch\", axis=1)\n des = des.drop(\"iter\", axis=1)\n for col in des.columns:\n des.apply(lambda x: metric_dict.update({\"%s_%s\" % (x.name, col): x[col]}), axis=1)\n if self.verbose:\n print(metric_dict, flush=True)\n self.save_metrics(metrics=metric_dict, epoch = epoch_now)\n return metric_dict\n\n return func",
"def test_get_derived_metric_by_version(self):\n pass",
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def derive_newrelic_qcache(self):\n # Query Cache\n vals = self.get_values([\"status/qcache_hits\", \"status/com_select\", \"status/qcache_free_blocks\",\n \"status/qcache_total_blocks\", \"status/qcache_inserts\", \"status/qcache_not_cached\"])\n if vals:\n qc_hits, reads, free, total, inserts, not_cached = vals\n\n self.update_metric(\"newrelic/query_cache_hits\", qc_hits)\n self.update_metric(\"newrelic/query_cache_misses\", inserts)\n self.update_metric(\"newrelic/query_cache_not_cached\", not_cached)\n\n pct_query_cache_hit_utilization = 0.0\n if (qc_hits + reads) > 0:\n pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_query_cache_hit_utilization\", pct_query_cache_hit_utilization)\n\n pct_query_cache_memory_in_use = 0.0\n if total > 0:\n pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)\n\n self.update_metric(\"newrelic/pct_query_cache_memory_in_use\", pct_query_cache_memory_in_use)\n\n # Temp Table\n vals = self.get_values([\"status/created_tmp_tables\", \"status/created_tmp_disk_tables\"])\n if vals:\n tmp_tables, tmp_tables_disk = vals\n\n pct_tmp_tables_written_to_disk = 0.0\n if tmp_tables > 0:\n pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0\n\n self.update_metric(\"newrelic/pct_tmp_tables_written_to_disk\", pct_tmp_tables_written_to_disk)",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def test_get_derived_metric_tags(self):\n pass"
] | [
"0.622359",
"0.6214452",
"0.6004368",
"0.5830676",
"0.58252716",
"0.5782086",
"0.57635754",
"0.56960446",
"0.5643359",
"0.56161225",
"0.5613598",
"0.55897206",
"0.55739963",
"0.5533932",
"0.5531093",
"0.5449568",
"0.54442644",
"0.5439991",
"0.54288715",
"0.5375634",
"0.5368589",
"0.5295893",
"0.52741873",
"0.5250416",
"0.5227268",
"0.522298",
"0.5216291",
"0.5210553",
"0.5193409",
"0.51768255"
] | 0.72587854 | 0 |
Derive the newrelic qcache metrics | def derive_newrelic_qcache(self):
# Query Cache
vals = self.get_values(["status/qcache_hits", "status/com_select", "status/qcache_free_blocks",
"status/qcache_total_blocks", "status/qcache_inserts", "status/qcache_not_cached"])
if vals:
qc_hits, reads, free, total, inserts, not_cached = vals
self.update_metric("newrelic/query_cache_hits", qc_hits)
self.update_metric("newrelic/query_cache_misses", inserts)
self.update_metric("newrelic/query_cache_not_cached", not_cached)
pct_query_cache_hit_utilization = 0.0
if (qc_hits + reads) > 0:
pct_query_cache_hit_utilization = (qc_hits / (qc_hits + reads)) * 100.0
self.update_metric("newrelic/pct_query_cache_hit_utilization", pct_query_cache_hit_utilization)
pct_query_cache_memory_in_use = 0.0
if total > 0:
pct_query_cache_memory_in_use = 100.0 - ((free / total) * 100.0)
self.update_metric("newrelic/pct_query_cache_memory_in_use", pct_query_cache_memory_in_use)
# Temp Table
vals = self.get_values(["status/created_tmp_tables", "status/created_tmp_disk_tables"])
if vals:
tmp_tables, tmp_tables_disk = vals
pct_tmp_tables_written_to_disk = 0.0
if tmp_tables > 0:
pct_tmp_tables_written_to_disk = (tmp_tables_disk / tmp_tables) * 100.0
self.update_metric("newrelic/pct_tmp_tables_written_to_disk", pct_tmp_tables_written_to_disk) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()",
"def test_get_derived_metric_history(self):\n pass",
"def derive_newrelic_throughput(self):\n # read and write throughput\n self.update_metric(\"newrelic/bytes_reads\", self.sum_of([\"status/bytes_sent\"]))\n self.update_metric(\"newrelic/bytes_writes\", self.sum_of([\"status/bytes_received\"]))\n\n # Connection management\n vals = self.get_values([\"status/threads_connected\", \"status/threads_running\", \"status/threads_cached\"])\n if vals:\n connected, running, cached = vals\n self.update_metric(\"newrelic/connections_connected\", connected)\n self.update_metric(\"newrelic/connections_running\", running)\n self.update_metric(\"newrelic/connections_cached\", cached)\n pct_connection_utilization = 0.0\n if vals[0] > 0:\n pct_connection_utilization = (running / connected) * 100.0\n self.update_metric(\"newrelic/pct_connection_utilization\", pct_connection_utilization)",
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def __init__(self, metrics_cache):\n self._metrics_cache = metrics_cache",
"def compute_metrics(self):\n pass",
"def test_get_derived_metric(self):\n pass",
"def metrics_group():",
"def generate_metrics_data(metricsquery: List, resultsquery: Dict, deltaminutes: int = 5, Region_name: str = None) -> Dict:\r\n cloudwatch=client('cloudwatch', region_name=Region_name) \r\n paginator = cloudwatch.get_paginator('get_metric_data')\r\n metricsgroup=grouper(metricsquery)\r\n resultsquery['ApiCalls']=0 \r\n for mqs in metricsgroup:\r\n for response in paginator.paginate(MetricDataQueries=mqs, StartTime=datetime.now()-timedelta(minutes=deltaminutes),EndTime=datetime.now()):\r\n for results in response['MetricDataResults']:\r\n resultsquery[results['Id']].append({'results':results})\r\n resultsquery['ApiCalls']+=1\r\n return resultsquery",
"def set_metrics(self):",
"def test_get_all_derived_metrics(self):\n pass",
"def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]",
"def _cache_get(self, metric_name):\n pass",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def __init__(self, quasar, name, start_date, end_date):\n self.quasar = quasar\n self.name = name\n self.start = start_date\n self.end = end_date\n\n self.sampling_freq = 120 # Hz\n\n self.cache = [[None, None] for x in range(CACHE_ENTRIES)]",
"def __init__(self, metrics, gt, pred):\n self.dict_metrics = self.compute_metrics(metrics, gt, pred)",
"def calculate_dataset_metrics(self):\n pass",
"def stats(self):\n return super(NoneCache, self).stats()",
"def test_get_derived_metric_by_version(self):\n pass",
"def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()",
"def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()",
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def get_metrics(self):\n return None",
"def __trace(self, cmd):\n # type: (str, bool) -> Span\n # create a new span\n s = self._datadog_tracer.trace(\n schematize_cache_operation(cmd, cache_provider=\"flask_cache\"),\n span_type=SpanTypes.CACHE,\n service=self._datadog_service,\n )\n\n s.set_tag_str(COMPONENT, config.flask_cache.integration_name)\n\n s.set_tag(SPAN_MEASURED_KEY)\n # set span tags\n s.set_tag_str(CACHE_BACKEND, self.config.get(\"CACHE_TYPE\"))\n s.set_tags(self._datadog_meta)\n # set analytics sample rate\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.flask_cache.get_analytics_sample_rate())\n # add connection meta if there is one\n client = _extract_client(self.cache)\n if client is not None:\n try:\n s.set_tags(_extract_conn_tags(client))\n except Exception:\n log.debug(\"error parsing connection tags\", exc_info=True)\n\n return s",
"def test_create_derived_metric(self):\n pass",
"def __init__(self):\n super().__init__()\n self.metric = 'JACRD'",
"def compute_metrics(self, results: list) -> dict:",
"def _retrieveCachedData(self):",
"def getMeasures():",
"def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)"
] | [
"0.66529113",
"0.6139272",
"0.6087995",
"0.60597914",
"0.5858092",
"0.58465123",
"0.58407485",
"0.5827561",
"0.5758349",
"0.574827",
"0.5718123",
"0.57102144",
"0.56753933",
"0.56337994",
"0.5624577",
"0.56002194",
"0.5561739",
"0.5540792",
"0.55359334",
"0.54992205",
"0.5476429",
"0.54754955",
"0.54595345",
"0.5433798",
"0.5413736",
"0.5406065",
"0.5400875",
"0.5396424",
"0.5385797",
"0.5382655"
] | 0.809507 | 0 |
Derive newrelic status metrics about slaves | def derive_newrelic_slaves(self):
if self.has_slave_data is True:
self.update_metric("newrelic/replication_lag", self.sum_of(["slave/seconds_behind_master"]))
# both need to be YES, which is 1
running = self.sum_of(["slave/slave_io_running", "slave/slave_sql_running"])
if running is not None:
replication_status = 1.0
if running == 2:
replication_status = 0.0
self.update_metric("newrelic/replication_status", replication_status)
self.update_metric("newrelic/slave_relay_log_bytes", self.sum_of(["slave/relay_log_pos"]))
self.update_metric("newrelic/master_log_lag_bytes", self.diff_of(["slave/read_master_log_pos",
"slave/exec_master_log_pos"]))
else: # This is a hack because the NR UI can't handle it missing for graphs
self.update_metric("newrelic/replication_lag", 0.0)
self.update_metric("newrelic/replication_status", 0.0)
self.update_metric("newrelic/slave_relay_log_bytes", 0.0)
self.update_metric("newrelic/master_log_lag_bytes", 0.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def __init__(self, is_master, track_processes, write_profile,\n verbose_cluster_stats):\n my_ip = appscale_info.get_private_ip()\n lb_ips = appscale_info.get_load_balancer_ips()\n\n self._is_lb = my_ip in lb_ips\n if is_master is not None:\n self._is_master = is_master\n else:\n self._is_master = my_ip == appscale_info.get_headnode_ip()\n self._track_processes = track_processes\n self._write_profile = write_profile\n\n # There are 3 kinds of local stats (node/processes/proxies)\n self._local_node_stats = LocalStats(\n cache_size=NODE_STATS_CACHE_SIZE,\n update_interval=UPDATE_NODE_STATS_INTERVAL)\n self._local_processes_stats = LocalStats(\n cache_size=PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROCESSES_STATS_INTERVAL)\n self._local_proxies_stats = LocalStats(\n cache_size=PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROXIES_STATS_INTERVAL)\n\n if self._is_master:\n # And 3 same kinds of cluster stats\n self._cluster_nodes_stats = ClusterStats(\n cache_size=CLUSTER_NODES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_NODES_STATS_INTERVAL)\n self._cluster_processes_stats = ClusterStats(\n cache_size=CLUSTER_PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROCESSES_STATS_INTERVAL)\n self._cluster_proxies_stats = ClusterStats(\n cache_size=CLUSTER_PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROXIES_STATS_INTERVAL)\n\n if not verbose_cluster_stats:\n # To reduce slave-to-master traffic and verbosity of cluster stats\n # you can select which fields of stats to collect on master\n self._cluster_nodes_stats.included_field_lists = {\n 'node': ['cpu', 'memory', 'partitions_dict', 'loadavg'],\n 'node.cpu': ['percent', 'count'],\n 'node.memory': ['available'],\n 'node.partition': ['free', 'used'],\n 'node.loadavg': ['last_5min'],\n }\n self._cluster_processes_stats.included_field_lists = {\n 'process': ['monit_name', 'unified_service_name', 'application_id',\n 'port', 'cpu', 'memory', 'children_stats_sum'],\n 'process.cpu': ['user', 'system', 'percent'],\n 'process.memory': ['resident', 'virtual', 'unique'],\n 'process.children_stats_sum': ['cpu', 'memory'],\n }\n self._cluster_proxies_stats.included_field_lists = {\n 'proxy': ['name', 'unified_service_name', 'application_id',\n 'frontend', 'backend'],\n 'proxy.frontend': ['scur', 'smax', 'rate', 'req_rate', 'req_tot'],\n 'proxy.backend': ['qcur', 'scur', 'hrsp_5xx', 'qtime', 'rtime'],\n }\n\n # All routes (handlers will be assigned during configuration)\n self._routes = {\n '/stats/local/node/cache': None,\n '/stats/local/node/current': None,\n '/stats/local/processes/cache': None,\n '/stats/local/processes/current': None,\n '/stats/local/proxies/cache': None,\n '/stats/local/proxies/current': None,\n '/stats/cluster/nodes': None,\n '/stats/cluster/processes': None,\n '/stats/cluster/proxies': None,\n }\n self._publishers = []",
"def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary",
"def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()",
"def updateRcloneJobStatus():\n global jobIds, jobStatusGauge\n\n # Check if the jobs are running, update the variables\n for jobName, jobId in jobIds.items():\n jobIsRunning = getRcloneJobRunning(jobId)\n jobIds[jobName] = jobId if jobIsRunning else None\n jobStatusGauge.labels(rclone_job=jobName).set(1 if jobIsRunning else 0)",
"def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers",
"def getSlaveNames():",
"def slaves(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.slaves.all())",
"def mmo_cluster_serverStatus(self, mmo_connection, inc_mongos, poll=False):\n serverStatus = self.mmo_execute_on_cluster(mmo_connection, \"serverStatus\", inc_mongos)\n if os.path.exists(\"/tmp/server_status.p\"):\n os.rename(\"/tmp/server_status.p\", \"/tmp/server_status.previous\")\n pickle.dump(serverStatus, open(\"/tmp/server_status.p\", \"wb\"))\n return serverStatus",
"def test_get_node_status_batterystatus(self):\n pass",
"def replication_status(self):\n psql = postgresql_svc.PSQL()\n try:\n query_out = psql.execute(self.replication_status_query)\n except PopenError, e:\n if 'function pg_last_xact_replay_timestamp() does not exist' in str(e):\n raise BaseException('This version of PostgreSQL server does not support replication status')\n else:\n raise e\n query_result = self._parse_query_out(query_out)\n\n is_master = int(__postgresql__[OPT_REPLICATION_MASTER])\n\n if query_result['xlog_delay'] is None:\n if is_master:\n return {'master': {'status': 'up'}}\n return {'slave': {'status': 'down',\n 'error': query_result['error']}}\n return {'slave': {'status': 'up',\n 'xlog_delay': query_result['xlog_delay']}}",
"def _cluster_status_action(self):\n yaml_load_err = \"Status of '{}' could not be loaded as yaml:\\n{}\"\n status_raw = zaza.model.run_action_on_leader(\"ovn-central\",\n \"cluster-status\")\n status_data = status_raw.data[\"results\"]\n # Verify expected items in the action result\n self.assertIn(\"ovnnb\", status_data)\n self.assertIn(\"ovnsb\", status_data)\n\n try:\n nb_status = yaml.safe_load(status_data[\"ovnnb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"northbound-cluster\",\n status_data[\"ovnnb\"]))\n try:\n sb_status = yaml.safe_load(status_data[\"ovnsb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"southbound-cluster\",\n status_data[\"ovnsb\"]))\n\n return sb_status, nb_status",
"def get_job_status(parent_pid, heart_pid):\n status_container = {}\n if parent_pid != -1:\n status_container[\"memory\"] = get_memory_usage(parent_pid, heart_pid)\n status_container[\"cpu_load\"] = get_cpu_load(parent_pid, heart_pid)\n return status_container",
"def test_get_virtual_machine_count_metrics(self):\n pass",
"def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))",
"def getMasterDiff(new, old):\n diff = ethercat_monitor.msg.EtherCATMasterStatus()\n diff.sent = new.sent - old.sent\n diff.dropped = new.dropped - old.dropped\n diff.late = new.late - old.late\n diff.unassigned_drops = new.unassigned_drops - old.unassigned_drops\n return diff",
"async def _status():\n # TODO(Deepankar): should we add versions of executors?\n return {\n 'status_code': status.HTTP_200_OK,\n 'jina_version': jina_version\n }",
"def test_cluster_status(self):\n application = zaza.model.get_application(\"ovn-central\")\n sb_status, nb_status = self._cluster_status_action()\n\n # Verify that cluster status includes \"unit_map\" field with correct\n # type\n for status in (nb_status, sb_status):\n self.assertIn(\"unit_map\", status)\n self.assertIsInstance(status[\"unit_map\"], dict)\n\n # Verify that units and their Server IDs are properly paired\n expected_mapping = {}\n for unit in application.units:\n unit_name = unit.entity_id\n nb_status_cmd = self.NB_CMD.format(\"cluster/status OVN_Northbound\")\n sb_status_cmd = self.SB_CMD.format(\"cluster/status OVN_Southbound\")\n nb_cluster_status = zaza.model.run_on_unit(unit_name,\n nb_status_cmd)\n sb_cluster_status = zaza.model.run_on_unit(unit_name,\n sb_status_cmd)\n nb_id = nb_cluster_status[\"Stdout\"].splitlines()[0]\n sb_id = sb_cluster_status[\"Stdout\"].splitlines()[0]\n expected_mapping[unit_name] = {\"sb_id\": sb_id, \"nb_id\": nb_id}\n\n for unit_name, unit_data in expected_mapping.items():\n sb_id = unit_data[\"sb_id\"]\n nb_id = unit_data[\"nb_id\"]\n self.assertEqual(sb_status[\"unit_map\"][unit_name], sb_id)\n self.assertEqual(nb_status[\"unit_map\"][unit_name], nb_id)",
"def derive_newrelic_innodb(self):\n # InnoDB Metrics\n vals = self.get_values([\"status/innodb_pages_created\", \"status/innodb_pages_read\",\n \"status/innodb_pages_written\", \"status/innodb_buffer_pool_read_requests\",\n \"status/innodb_buffer_pool_reads\", \"status/innodb_data_fsyncs\",\n \"status/innodb_os_log_fsyncs\"])\n if vals:\n created, read, written, bp_read_requests, bp_reads, data_fsync, log_fsync = vals\n self.update_metric(\"newrelic/innodb_bp_pages_created\", created)\n self.update_metric(\"newrelic/innodb_bp_pages_read\", read)\n self.update_metric(\"newrelic/innodb_bp_pages_written\", written)\n\n hit_ratio = 0.0\n if (bp_read_requests + bp_reads) > 0:\n hit_ratio = (bp_read_requests / (bp_read_requests + bp_reads)) * 100.0\n\n self.update_metric(\"newrelic/pct_innodb_buffer_pool_hit_ratio\", hit_ratio)\n self.update_metric(\"newrelic/innodb_fsyncs_data\", data_fsync)\n self.update_metric(\"newrelic/innodb_fsyncs_os_log\", log_fsync)\n\n # InnoDB Buffer Metrics\n vals = self.get_values([\"status/innodb_buffer_pool_pages_total\", \"status/innodb_buffer_pool_pages_data\",\n \"status/innodb_buffer_pool_pages_misc\", \"status/innodb_buffer_pool_pages_dirty\",\n \"status/innodb_buffer_pool_pages_free\"])\n if vals:\n pages_total, pages_data, pages_misc, pages_dirty, pages_free = vals\n unassigned = pages_total - pages_data - pages_free - pages_misc\n\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_clean\", pages_data - pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_dirty\", pages_dirty)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_misc\", pages_misc)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_free\", pages_free)\n self.update_metric(\"newrelic/innodb_buffer_pool_pages_unassigned\", unassigned)",
"def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()",
"def test_tableau_server_parse_status_metrics(self):\n xml_response = self.init_default_check()\n got = TableauServerStatusParser.tableau_server_parse_status_metrics(xml_response=xml_response[0])\n expected = 'tableau_server_process_status'\n self.assertEqual(expected, got.name)",
"def test_get_refresh_job_status(self):\n pass",
"def upscale_cluster_info(VMname, master=False):\n with open('TemporaryInfo.json', mode='r') as jsonfile:\n TemporaryInfo = json.load(jsonfile)\n privateIP = TemporaryInfo.get(\"privateIpAddress\")\n publicIP = TemporaryInfo.get(\"publicIpAddress\")\n jsonfile.close()\n\n with open('ClusterInfo.json', mode='r') as jsonfile:\n if len(jsonfile.readline()) == 0:\n sys.exit('Error: ClusterInfo.json file appears to be empty.')\n else:\n jsonfile.seek(0,0) # Return the pointer to the beginning of the file\n ClusterInfo = json.load(jsonfile)\n nrSlaves = ClusterInfo[0].get(\"NumberSlaves\")\n jsonfile.close()\n\n with open('ClusterInfoUpdated.json', mode='w') as jsonfile:\n if master:\n if ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a master while according to ClusterInfo there already is one.')\n else:\n newmaster = {}\n newmaster['privateIP'] = privateIP\n newmaster['publicIP'] = publicIP\n newmaster['role'] = 'Master_and_Slave'\n newmaster['VMname'] = VMname\n nrSlaves += 1 # Adding a new slave to the count\n ClusterInfo[0][\"ExistMaster\"] = True\n ClusterInfo.append(newmaster)\n\n if not ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a slave while according to ClusterInfo there is no master.')\n if not master:\n nrSlaves += 1 # Adding a new slave to the count\n newslave = {}\n newslave['privateIP'] = privateIP\n newslave['publicIP'] = publicIP\n newslave['VMname'] = VMname\n newslave['SlaveID'] = str(nrSlaves)\n newslave['role'] = 'Slave'\n ClusterInfo.append(newslave)\n\n ClusterInfo[0][\"NumberSlaves\"] = nrSlaves\n json.dump(ClusterInfo, jsonfile)\n jsonfile.close()\n\n return",
"def test_get_virtual_machine_count_metrics1(self):\n pass",
"def remote_status():",
"def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")",
"def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data",
"def getClientJobsInformation(client):\n # getSlaveForDispatch()\n #jobs = mongo.db.jobs.find({'owner': client, 'is_active': True})\n jobs = mongo.db.jobs.find({'is_active': True})\n\n # result = i.title()\n # if any([s.get('status')=='on progress' for s in tasks]):\n # result = 'On Progress'\n # return result\n\n result = [{\n 'name': j.get('name'),\n 'datetime': j.get('datetime'),\n 'status': getJobStatus(j),\n 'priority': j.get('priority'),\n 'progress': sum([t.get('progress') for t in mongo.db.tasks.find({'job': j.get('_id')})]) /\n (mongo.db.tasks.find({'job': j.get('_id')}).count() or -1),\n 'id': str(j.get('_id')),\n 'tasks_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True}).count(),\n 'failed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'failed'}).count(),\n 'completed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'completed'}).count(),\n 'active_task': 'Frame 43',\n } for j in jobs]\n return result or {}",
"def _remote_worker_ids_for_metrics(self) -> List[int]:\n return self.workers.healthy_worker_ids()",
"def derive_newrelic_throughput(self):\n # read and write throughput\n self.update_metric(\"newrelic/bytes_reads\", self.sum_of([\"status/bytes_sent\"]))\n self.update_metric(\"newrelic/bytes_writes\", self.sum_of([\"status/bytes_received\"]))\n\n # Connection management\n vals = self.get_values([\"status/threads_connected\", \"status/threads_running\", \"status/threads_cached\"])\n if vals:\n connected, running, cached = vals\n self.update_metric(\"newrelic/connections_connected\", connected)\n self.update_metric(\"newrelic/connections_running\", running)\n self.update_metric(\"newrelic/connections_cached\", cached)\n pct_connection_utilization = 0.0\n if vals[0] > 0:\n pct_connection_utilization = (running / connected) * 100.0\n self.update_metric(\"newrelic/pct_connection_utilization\", pct_connection_utilization)"
] | [
"0.7028809",
"0.5855735",
"0.5841473",
"0.5718452",
"0.57006025",
"0.560662",
"0.55952454",
"0.5494366",
"0.548229",
"0.543067",
"0.5367218",
"0.5296817",
"0.5293506",
"0.52714247",
"0.527107",
"0.52170867",
"0.5173371",
"0.51723516",
"0.5159897",
"0.5144632",
"0.5134972",
"0.5133903",
"0.51303995",
"0.51268387",
"0.51185685",
"0.5092153",
"0.5084869",
"0.5081146",
"0.5069976",
"0.5066066"
] | 0.71810716 | 0 |
Parse the values from mysql, converting them to floats when necessary on|yes|true => 1 off|no|false => 0 null => 1 | def parse_metric_value(self, value):
if isinstance(value, str):
if value == "":
return None
# yes|true|on
if self.is_true.match(value):
return 1
# no|false|off
if self.is_false.match(value):
return 0
if self.is_null.match(value):
return -1
# anything else, try to convert it to a float
try:
r = float(value)
return r
except:
pass
return None
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convertToFloat(boolean: bool) -> float:\n ...",
"def parse_value(self, value):\n\t\t\n\t\tif goodies.is_float(value):\n\t\t\treturn float(value)\n\t\telif goodies.is_int(value):\n\t\t\treturn int(value)\n\t\telif goodies.is_bool(value):\n\t\t\treturn bool(value.capitalize())\n\t\telse:\n\t\t\treturn value",
"def coerce(s1):\n if s1==\"true\":\n return True\n elif s1==\"false\":\n return False\n elif s1.isnumeric():\n return int(s1)\n elif '.' in s1 and s1.replace('.','').isnumeric():\n return float(s1)\n else:\n return s1",
"def cast(val):\n if val.lower() == str(True).lower():\n return True\n elif val.lower() == str(False).lower():\n return False\n\n try:\n return int(val)\n except ValueError:\n pass\n\n try:\n return float(val)\n except ValueError:\n pass\n\n return val",
"def _parse_env_value(val):\n if val.lower() == \"false\":\n return False\n elif val.lower() == \"true\":\n return True\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return val",
"def convertToFloat(vote):\n if vote == 'y':\n return 1\n if vote == 'n':\n return -1\n if vote == '?':\n return 0",
"def convert_val(val_str, val):\n if val is bool:\n if 'true' in val_str.lower(): val_str = 'true' \n else: val_str = '' # otherwise set to false\n val_type = val\n try:\n return val_type(val_str)\n except ValueError:\n # Can it be a float ?\n return val_type(float(val_str))",
"def cast(value):\n try:\n value = int(value)\n except ValueError:\n if value.lower().strip() in [\"true\", \"t\", \"1\", \"yes\"]:\n value = True\n elif value.lower().strip() in [\"false\", \"f\", \"no\", \"0\"]:\n value = False\n return value",
"def complete_opt_lossy_json_float(self, text, *_):\n return [t for t in (\"true\", \"false\", \"yes\", \"no\") if t.startswith(text.lower())]",
"def _convert_value_to_correct_datatype(value: str):\n if value == 'true':\n return True\n if value == 'false':\n return False\n try:\n result = ast.literal_eval(value)\n return result\n except Exception: # if it is not possible to evaluate the value then consider it as a string\n return value",
"def values(self, command, separator=',', cast=float, preprocess_reply=None, maxsplit=-1,\n **kwargs):\n results = self.ask(command, **kwargs).strip()\n if callable(preprocess_reply):\n results = preprocess_reply(results)\n elif callable(self.preprocess_reply):\n results = self.preprocess_reply(results)\n results = results.split(separator, maxsplit=maxsplit)\n for i, result in enumerate(results):\n try:\n if cast == bool:\n # Need to cast to float first since results are usually\n # strings and bool of a non-empty string is always True\n results[i] = bool(float(result))\n else:\n results[i] = cast(result)\n except Exception:\n pass # Keep as string\n return results",
"def parse_temp(value):\n\n if isinstance(value, str):\n value = \"\".join(value.split())\n if value.lower() == OFF:\n return OFF\n\n try:\n return float(value)\n except (ValueError, TypeError):\n return None",
"def parse_bool(value):\n if value in (\"true\", \"True\", \"yes\", \"1\", \"on\"):\n return True\n if value in (\"false\", \"False\", \"None\", \"no\", \"0\", \"off\"):\n return False\n return bool(int(value))",
"def _parse(self, val):\n if self.type == \"integer\":\n return int(val)\n elif self.type == \"number\":\n return float(val)\n elif self.type == \"boolean\":\n lower_val = str(val).lower()\n if lower_val not in {\"true\", \"false\"}:\n msg = \"Boolean parameter '{}' only accept True/False, got {}.\"\n raise ValidationException(\n message=msg.format(self.name, val),\n no_personal_data_message=msg.format(\"[self.name]\", \"[val]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return True if lower_val == \"true\" else False\n return val",
"def _convert(schema, section, option, value):\n t = schema[section][option][0]\n \n if t == \"str\":\n result = value\n elif t == \"int\":\n result = int(value)\n elif t == \"float\":\n result = float(value)\n elif t == \"bool\":\n if str(value).upper() in (\"1\", \"TRUE\", \"YES\", \"Y\"):\n result = True\n elif str(value).upper() in (\"0\", \"FALSE\", \"NO\", \"N\"):\n result = False\n else:\n raise ValueError(\"Not a proper boolean value\")\n else:\n raise ValueError(\"option can only be of type \"\n \"int, float, str, or bool\")\n return section, option, result",
"def convertToDouble(boolean: bool) -> float:\n ...",
"def _convert_bool(self) -> pd.Series:\n\n if self.requires_nan:\n dtype = \"float\"\n else:\n dtype = \"bool\"\n\n return self._convert(dtype=dtype)",
"def coerce_value(value):\n if not isinstance(value, string_types):\n return value\n\n if re.match(r'^[0-9]+$', value):\n return int(value)\n\n if re.match(r'^[.0-9]+$', value):\n try:\n return float(value)\n except ValueError:\n return value\n\n if re.match(r'^(true|false|yes|no)$', value, flags=re.IGNORECASE):\n return bool(util.strtobool(value))\n\n return value",
"def convert_str(strr):\n if strr.lower() == 'true' or strr.lower() == 't':\n return True\n if strr.lower() == 'false' or strr.lower() == 'f':\n return False\n\n try:\n float_value = float(strr)\n return float_value\n except ValueError as ex:\n print(repr(ex))\n\n return strr",
"def test_wiki_toc_isfloat_true(self):\n from .wiki_toc import isfloat\n value = isfloat(value='40.22222')\n self.assertTrue(value is True)",
"def _parse_params(params):\n for key, value in params.items():\n if value.lower() in ('none', 'null', ''):\n params[key] = None\n elif value.lower() == 'true':\n params[key] = True\n elif value.lower() == 'false':\n params[key] = False\n elif value.isdigit() or (value[0] == '-' and value[1:].isdigit()):\n params[key] = int(value)\n elif ',' in value:\n params[key] = list(map(lambda x: x.strip(), value.split(',')))\n else:\n try:\n params[key] = float(value)\n except:\n pass\n return params",
"def _get_value_type(self, value):\n\n value = value.strip()\n\n if value == 'True':\n return True\n elif value == 'False':\n return False\n else:\n try:\n return_value = int(value)\n except ValueError:\n try:\n return_value = float(value)\n except ValueError:\n return value\n\n return return_value",
"def test_boolean_custom_values(self):\n true_values = ['YES', 'yes', 'Yes']\n false_values = ['NO', 'no', 'No']\n wrong_values = ['true', 'false', 'True', 'False', 'y', 'n', 'Y', 'N', 't', '1', 1, '0', 0]\n descriptor = self.base_field_descriptor\n descriptor['type'] = 'boolean'\n # only 'default' format\n descriptor['format'] = 'default'\n descriptor['trueValues'] = true_values\n descriptor['falseValues'] = false_values\n\n f = SchemaField(descriptor)\n for v in true_values:\n self.assertTrue(f.cast(v))\n for v in false_values:\n self.assertFalse(f.cast(v))\n for v in wrong_values:\n with self.assertRaises(Exception):\n f.cast(v)",
"def parse_input(userstring):\n xsplit = userstring.split()\n stringtovalues = [float(x) for x in xsplit]\n\n return stringtovalues",
"def detect_truth_value(astring):\n value = False\n if astring.lower() in [\"true\", \"t\"]:\n value = True\n else:\n try:\n value = float(valuenode.raw)\n except:\n pass\n return value",
"def value_type_check(table_rows):\n types = table_rows[0].types\n rst = True\n lst = []\n row_num = 0\n for row in table_rows:\n for i in range(0, len(row.values)):\n data_type = types[i].strip().upper()\n value = row.values[i].strip()\n if(data_type == \"INT\"):\n if(value != \"\" and _is_int(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n\n elif(data_type == \"FLOAT\"):\n if(value != \"\" and _is_float(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n row_num += 1\n return rst,\",\".join(lst)",
"def check_for_float(check):",
"def translate(value):\n if re.match(r\"true\",value,re.IGNORECASE) or re.match(r\"t\",value,re.IGNORECASE):\n return True\n if re.match(r\"false\",value,re.IGNORECASE) or re.match(r\"f\",value,re.IGNORECASE):\n return False\n if re.match(r\"none\",value,re.IGNORECASE):\n return None\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n return value",
"def convert_bool(string):\n if string == 'True':\n return True, True\n elif string == 'False':\n return True, False\n else:\n return False, False",
"def _check_value(value, field):\n if not value:\n return False\n\n if field.get('date', False):\n # Get date format\n date_format = field.get('date_format', False) or json_pattern_part.get('date_format', False) or self.master_json_pattern.get('date_format', False)\n if date_format:\n value = datetime.strptime(value, date_format)\n\n if field.get('name'):\n field_name = field.get('name')\n # Get the type of the column and cast if necessary\n field_type = model_obj._columns[field_name]._type\n if field_type == 'integer':\n try:\n value = int(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n elif field_type == 'float':\n try:\n value = float(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n return value"
] | [
"0.5576609",
"0.5521588",
"0.5519995",
"0.53601056",
"0.5292483",
"0.52703524",
"0.52464485",
"0.5240127",
"0.5214614",
"0.5169053",
"0.51640624",
"0.51472664",
"0.51410556",
"0.5130782",
"0.5097779",
"0.50775486",
"0.506641",
"0.5049185",
"0.5041503",
"0.5037315",
"0.5005407",
"0.49843347",
"0.4971924",
"0.49715507",
"0.4961228",
"0.4958113",
"0.49509373",
"0.4947762",
"0.49282593",
"0.48687232"
] | 0.5565192 | 1 |
Parse the innodb status results and pull interesting metrics from it. | def parse_innodb_status_stats(self, cursor):
rows = list(cursor)
metrics = {
"history_list_length": "^History list length\s+(\d+)",
"log_sequence_number": "^Log sequence number\s+(\d+)",
"last_checkpoint": "^Last checkpoint at\s+(\d+)",
"queries_inside_innodb": "^(\d+)\s+queries inside InnoDB",
"queries_in_queue": "queries inside InnoDB,\s+(\d+)\s+queries in queue",
}
result = {
'log_sequence_number': 0.0,
'last_checkpoint': 0.0
}
if len(rows) > 0:
text = rows[0][-1]
for m in metrics:
match = re.search(metrics[m], text, re.MULTILINE)
if match is not None:
result[m] = match.group(1)
result['checkpoint_age_metric'] = (float(result.get('log_sequence_number', 0.0)) -
float(result.get('last_checkpoint', 0.0)))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()",
"def test_tableau_server_parse_status_metrics(self):\n xml_response = self.init_default_check()\n got = TableauServerStatusParser.tableau_server_parse_status_metrics(xml_response=xml_response[0])\n expected = 'tableau_server_process_status'\n self.assertEqual(expected, got.name)",
"def analyze_results(self, results):\n ok_c = 0\n ko_c = 0\n for row in results:\n if \"opentsdb.health\" not in row[2] and \".health\" in row[2]:\n if row[4] == \"ERROR\":\n ko_c += 1\n else:\n ok_c += 1\n return ok_c, ko_c",
"def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))",
"def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break",
"def _read_status(self, cls=MySQLStatus):",
"def ParseNodeStats(self):\n mc = subprocess.Popen([MOCACTL, 'show', '--nodestats', str(self.NodeID)],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n rx_err = 0\n for line in out.splitlines():\n tx = TX_RE.search(line)\n if tx is not None:\n type(self).TxPackets.Set(self, IntOrZero(tx.group(1)))\n rx = RX_RE.search(line)\n if rx is not None:\n type(self).RxPackets.Set(self, IntOrZero(rx.group(1)))\n e1 = E1_RE.search(line)\n if e1 is not None:\n rx_err += IntOrZero(e1.group(1))\n e2 = E2_RE.search(line)\n if e2 is not None:\n rx_err += IntOrZero(e2.group(1))\n type(self).RxErroredAndMissedPackets.Set(self, rx_err)",
"def status():\n schedule_log(\"Starting Elasticsearch Monitor\")\n\n command_text = 'curl http://127.0.0.1:9200/_stats'\n\n schedule_log('Running: %s' % command_text)\n\n output, error = safe_run(command_text)\n\n try:\n data = json.loads(output)\n\n schedule_log('Loaded json, saving.')\n\n save(True, output, mongo_database(), mongo_collection(), output)\n except Exception as ex:\n schedule_log('Reporting as failed.')\n schedule_log('%s' % ex)\n schedule_log(output)\n error = '%s'\n\n if error:\n save(False, {}, mongo_database(), mongo_collection(), error)\n\n schedule_log('Finished')",
"def parse_megacli_status(status):\n\n try:\n command = ['/usr/sbin/megacli', '-LdPdInfo', '-aAll']\n proc = subprocess.Popen(command, stdout=subprocess.PIPE)\n except:\n print('Unable to run: {}'.format(command))\n\n for line in proc.stdout:\n line = line.strip(' \\t\\r\\n')\n\n if len(line) == 0:\n continue\n\n _process_line(line, status)\n\n proc.wait()",
"def server_status(profile=\"default\"):\n ret = {\n \"Scoreboard\": {\n \"_\": 0,\n \"S\": 0,\n \"R\": 0,\n \"W\": 0,\n \"K\": 0,\n \"D\": 0,\n \"C\": 0,\n \"L\": 0,\n \"G\": 0,\n \"I\": 0,\n \".\": 0,\n },\n }\n\n # Get configuration from pillar\n url = __salt__[\"config.get\"](\n \"apache.server-status:{}:url\".format(profile), \"http://localhost/server-status\"\n )\n user = __salt__[\"config.get\"](\"apache.server-status:{}:user\".format(profile), \"\")\n passwd = __salt__[\"config.get\"](\"apache.server-status:{}:pass\".format(profile), \"\")\n realm = __salt__[\"config.get\"](\"apache.server-status:{}:realm\".format(profile), \"\")\n timeout = __salt__[\"config.get\"](\n \"apache.server-status:{}:timeout\".format(profile), 5\n )\n\n # create authentication handler if configuration exists\n if user and passwd:\n basic = urllib.request.HTTPBasicAuthHandler()\n basic.add_password(realm=realm, uri=url, user=user, passwd=passwd)\n digest = urllib.request.HTTPDigestAuthHandler()\n digest.add_password(realm=realm, uri=url, user=user, passwd=passwd)\n urllib.request.install_opener(urllib.request.build_opener(basic, digest))\n\n # get http data\n url += \"?auto\"\n try:\n response = urllib.request.urlopen(url, timeout=timeout).read().splitlines()\n except urllib.error.URLError:\n return \"error\"\n\n # parse the data\n for line in response:\n splt = line.split(\":\", 1)\n splt[0] = splt[0].strip()\n splt[1] = splt[1].strip()\n\n if splt[0] == \"Scoreboard\":\n for c in splt[1]:\n ret[\"Scoreboard\"][c] += 1\n else:\n if splt[1].isdigit():\n ret[splt[0]] = int(splt[1])\n else:\n ret[splt[0]] = float(splt[1])\n\n # return the good stuff\n return ret",
"def status(self, database_name=False):\n if database_name:\n raw = self._send_command(database_name, \"status\")\n return parse_statusline(raw)\n else:\n raw = self._send_command(\"#all\", \"status\")\n return [parse_statusline(line) for line in raw.split(\"\\n\")]",
"def parse_collectd_result(self, metrics):\n result = {\n \"cpu\": {},\n \"memory\": {},\n \"hugepages\": {},\n \"dpdkstat\": {},\n \"virt\": {},\n \"ovs_stats\": {},\n \"intel_pmu\": {},\n }\n testcase = \"\"\n\n # unicode decode\n decoded = ((safe_decode(k, 'utf-8'), safe_decode(v, 'utf-8')) for k, v in metrics.items())\n for key, value in decoded:\n key_split = key.split(\"/\")\n res_key_iter = (key for key in key_split if \"nsb_stats\" not in key)\n res_key0 = next(res_key_iter)\n res_key1 = next(res_key_iter)\n\n if \"cpu\" in res_key0 or \"intel_rdt\" in res_key0 or \"intel_pmu\" in res_key0:\n cpu_key, name, metric, testcase = \\\n self.get_cpu_data(res_key0, res_key1, value)\n result[\"cpu\"].setdefault(cpu_key, {}).update({name: metric})\n\n elif \"memory\" in res_key0:\n result[\"memory\"].update({res_key1: value.split(\":\")[0]})\n\n elif \"hugepages\" in res_key0:\n result[\"hugepages\"].update(self.parse_hugepages(key_split, value))\n\n elif \"dpdkstat\" in res_key0:\n result[\"dpdkstat\"].update(self.parse_dpdkstat(key_split, value))\n\n elif \"virt\" in res_key1:\n result[\"virt\"].update(self.parse_virt(key_split, value))\n\n elif \"ovs_stats\" in res_key0:\n result[\"ovs_stats\"].update(self.parse_ovs_stats(key_split, value))\n\n result[\"timestamp\"] = testcase\n\n return result",
"def update_status_metrics(status: EnodebStatus) -> None:\n # Call every second\n metrics_by_stat_key = {\n 'enodeb_connected': metrics.STAT_ENODEB_CONNECTED,\n 'enodeb_configured': metrics.STAT_ENODEB_CONFIGURED,\n 'opstate_enabled': metrics.STAT_OPSTATE_ENABLED,\n 'rf_tx_on': metrics.STAT_RF_TX_ENABLED,\n 'gps_connected': metrics.STAT_GPS_CONNECTED,\n 'ptp_connected': metrics.STAT_PTP_CONNECTED,\n 'mme_connected': metrics.STAT_MME_CONNECTED,\n }\n\n def get_metric_value(enodeb_status, key):\n # Metrics are \"sticky\" when synced to the cloud - if we don't\n # receive a status update from enodeb, set the metric to 0\n # to explicitly indicate that it was not received, otherwise the\n # metrics collector will continue to report the last value\n if key not in enodeb_status:\n return 0\n\n try:\n return int(enodeb_status[key])\n except ValueError:\n logging.error('Could not cast metric value %s to int',\n enodeb_status[key])\n return 0\n\n for stat_key, metric in metrics_by_stat_key.items():\n metric.set(get_metric_value(status, stat_key))",
"def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status",
"def parse_statusline(line):\n if line.startswith(\"=\"):\n line = line[1:]\n if not line.startswith(\"sabdb:\"):\n raise OperationalError(\"wrong result received\")\n\n code, prot_version, rest = line.split(\":\", 2)\n\n if prot_version not in [\"1\", \"2\"]:\n raise InterfaceError(\"unsupported sabdb protocol\")\n else:\n prot_version = int(prot_version)\n\n subparts = rest.split(\",\")\n sub_iter = iter(subparts)\n\n info = {}\n\n info[\"name\"] = next(sub_iter)\n info[\"path\"] = next(sub_iter)\n info[\"locked\"] = next(sub_iter) == \"1\"\n info[\"state\"] = int(next(sub_iter))\n info[\"scenarios\"] = next(sub_iter).split(\"'\")\n if prot_version == 1:\n next(sub_iter)\n info[\"start_counter\"] = int(next(sub_iter))\n info[\"stop_counter\"] = int(next(sub_iter))\n info[\"crash_counter\"] = int(next(sub_iter))\n info[\"avg_uptime\"] = int(next(sub_iter))\n info[\"max_uptime\"] = int(next(sub_iter))\n info[\"min_uptime\"] = int(next(sub_iter))\n info[\"last_crash\"] = int(next(sub_iter))\n info[\"last_start\"] = int(next(sub_iter))\n if prot_version > 1:\n info[\"last_stop\"] = int(next(sub_iter))\n info[\"crash_avg1\"] = next(sub_iter) == \"1\"\n info[\"crash_avg10\"] = float(next(sub_iter))\n info[\"crash_avg30\"] = float(next(sub_iter))\n\n return info",
"def get_status_messages(self):\n\n try:\n subContext = conf.EHST_MESSAGES\n connHandler = self._tap._TapPlus__getconnhandler()\n response = connHandler.execute_tapget(subContext, verbose=False)\n if response.status == 200:\n for line in response:\n string_message = line.decode(\"utf-8\")\n print(string_message[string_message.index('=') + 1:])\n except OSError:\n print(\"Status messages could not be retrieved\")",
"def parse_uptime():\n\tr = subprocess.check_output([\"uptime\"])\n\tparsed_dict = {}\n\n\t#load average over past minute\n\n\t# code for linux\n\tuptime_values = re.split(\", \", r)\n\tload_averages = re.split(\"load average: \", uptime_values[3])\n\tparsed_dict[\"load\"] = re.split(\", \",load_averages[1])[0]\n\t\n\t# code for Unix (Mac)\n\t# uptime_values = re.split(\", \", r)\n\t# load_averages = re.split(\"load averages: \", uptime_values[3])\n\t# parsed_dict[\"load\"] = re.split(\" \",load_averages[1])[0].replace(',', '.')\n\n\tparsed_dict[\"users\"] = uptime_values[2]\n\tparsed_dict[\"uptime\"] = re.split(\"up \", uptime_values[0])[1]\n\t# US formated datetime to be displayed in top right corner\n\tparsed_dict[\"date\"] = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\t# Server IP Adress\n\tparsed_dict[\"ip\"] = socket.gethostbyname(socket.gethostname())\n\t# Time to be displayed in alert container\n\n\treturn parsed_dict",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"async def get_status(self) -> dict[str, Any]:\n\n def check_int(s):\n if s[0] in (\"-\", \"+\"):\n return s[1:].isdigit()\n return s.isdigit()\n\n cmd = await self.send_command(\"STATUS\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n status = {\n key.lower(): int(value) if check_int(value) else float(value)\n for (key, value) in map(lambda k: k.split(\"=\"), keywords)\n }\n\n return status",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def parse_stats(output):\n lines = [line for line in output if \"[Stats]\" in line]\n stats = {\n 'totals': {'time': 0, 'tasks': 0, 'avg': 0}\n }\n for line in lines:\n m = re.search(r'\\((\\d+) ms\\).+\\((\\d+)\\).+\\((\\d+) us.+\\)', line)\n if not m:\n continue\n dt, tasks, avg = map(int, m.groups())\n if 'totals' in line:\n stats['totals'] = {'time': dt, 'tasks': tasks, 'avg': avg}\n return stats",
"def _get_error_info(self, result, log):\n _ = '/opt/l2deploy/logs/OverallStatusReport'\n f = self._remote_cmd(\"grep '{}' {}\".format(_, log))\n f = f.get('output').split('[')[-1][:-1]\n\n for n in [result] if self.nodes == 1 else result['nodes']:\n if 'failed' == n.get('status').lower():\n # 10th line in the detail report contains the required info\n c = \"grep -A 10 {} {}\".format(n.get('server'), f)\n c += \" | grep OS_Install_Status_Detail\"\n e = self._remote_cmd(c).get('output').split(':', 1)[1]\n LOG.info(\"{} failed due to {}\".format(n['server'], e))",
"def _parse_result(self, responses, verbose=False):\n\n # loading the columns config\n colConfig = None\n if self._current_service:\n colConfig = self._column_configs.get(self._current_service)\n self._current_service = None # clearing current service\n\n resultList = []\n\n for resp in responses:\n result = resp.json()\n\n # check for error message\n if result['status'] == \"ERROR\":\n raise RemoteServiceError(result.get('msg', \"There was an error with your request.\"))\n\n resTable = _mashup_json_to_table(result, colConfig)\n resultList.append(resTable)\n\n allResults = vstack(resultList)\n\n # Check for no results\n if not allResults:\n warnings.warn(\"Query returned no results.\", NoResultsWarning)\n return allResults",
"def ParseNodeStatus(self):\n mc = subprocess.Popen([MOCACTL, 'show', '--nodestatus', str(self.NodeID)],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n bitloading = [[], []]\n bitloadidx = 0\n for line in out.splitlines():\n mac = MAC_RE.search(line)\n if mac is not None:\n type(self).MACAddress.Set(self, mac.group(1))\n pnc = PNC_RE.search(line)\n if pnc is not None:\n preferred = False if pnc.group(1) is '0' else True\n type(self).PreferredNC.Set(self, preferred)\n ptx = PTX_RE.search(line)\n if ptx is not None:\n type(self).PHYTxRate.Set(self, (IntOrZero(ptx.group(2)) / 1000000))\n txpowercontrol = int(FloatOrZero(ptx.group(1)))\n type(self).TxPowerControlReduction.Set(self, txpowercontrol)\n prx = PRX_RE.search(line)\n if prx is not None:\n type(self).PHYRxRate.Set(self, (IntOrZero(prx.group(2)) / 1000000))\n rxpower = FloatOrZero(prx.group(1))\n type(self).RxPowerLevel.Set(self, abs(int(rxpower)))\n type(self).X_CATAWAMPUS_ORG_RxPowerLevel_dBm.Set(self, rxpower)\n rxsnr = FloatOrZero(prx.group(3))\n type(self).RxSNR.Set(self, abs(int(rxsnr)))\n type(self).X_CATAWAMPUS_ORG_RxSNR_dB.Set(self, rxsnr)\n rxb = RXB_RE.search(line)\n if rxb is not None:\n type(self).TxBcastRate.Set(self, (IntOrZero(rxb.group(2)) / 1000000))\n rxbpower = FloatOrZero(rxb.group(1))\n type(self).RxBcastPowerLevel.Set(self, abs(int(rxbpower)))\n type(self).X_CATAWAMPUS_ORG_RxBcastPowerLevel_dBm.Set(self, rxbpower)\n qam = QAM_RE.search(line)\n if qam is not None:\n qam256 = False if qam.group(1) is '0' else True\n type(self).QAM256Capable.Set(self, qam256)\n agg = AGG_RE.search(line)\n if agg is not None:\n aggcapable = IntOrZero(agg.group(1))\n type(self).PacketAggregationCapability.Set(self, aggcapable)\n if 'Unicast Bit Loading Info' in line:\n bitloadidx = 0\n if 'Broadcast Bit Loading Info' in line:\n bitloadidx = 1\n btl = BTL_RE.search(line)\n if btl is not None:\n bitloading[bitloadidx].append(line)\n (txbitl, rxbitl) = _CombineBitloading(bitloading[0])\n type(self).X_CATAWAMPUS_ORG_RxBitloading.Set(self, '$BRCM1$' + rxbitl)\n type(self).X_CATAWAMPUS_ORG_TxBitloading.Set(self, '$BRCM1$' + txbitl)",
"def get_status_data(conn_info, like_string):\n\trows = []\n\tconn = connect(conn_info)\n\tcur = conn.cursor()\n\tcur.execute(\"show status like '{}';\".format(like_string))\n\n\tfor row in cur:\n\t\trows.append(row)\n\n\tcur.close()\n\tconn.close()\n\n\treturn rows",
"def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]",
"def thread_status(self,status): # general function to get datas/infos from all threads back to the main\n if status[0]==\"Update_Status\":\n if len(status)>2:\n self.update_status(status[1],wait_time=self.wait_time,log_type=status[2])\n else:\n self.update_status(status[1],wait_time=self.wait_time)\n\n elif status[0]==\"Update_scan_index\":\n #status[1] = [ind_scan,ind_average]\n self.ind_scan=status[1][0]\n self.ui.indice_scan_sb.setValue(status[1][0])\n self.ind_average = status[1][1]\n self.ui.indice_average_sb.setValue(status[1][1])\n\n elif status[0]==\"Scan_done\":\n self.ui.scan_done_LED.set_as_true()\n self.save_scan()\n if not self.overshoot:\n self.set_ini_positions()\n self.ui.set_scan_pb.setEnabled(True)\n self.ui.set_ini_positions_pb.setEnabled(True)\n self.ui.start_scan_pb.setEnabled(True)\n elif status[0]==\"Timeout\":\n self.ui.log_message.setText('Timeout occurred')",
"def _parse_results(self, handle):\n result_reader = ResultsReader(handle)\n for result in result_reader:\n\n # Diagnostic messages may be returned in the results\n if isinstance(result, Message):\n logger.debug('[{}] {}'.format(result.type, result.message))\n\n # Normal events are returned as dicts\n elif isinstance(result, dict):\n result = dict(result)\n if '_time' in result:\n result['_time'] = SplunkAbstraction._to_datetime(result['_time'])\n yield {\n 'time': result['_time'] if '_time' in result else '',\n 'metadata': {k: v for k, v in result.items() if k.startswith('_')},\n 'state': {k: v for k, v in result.items() if not k.startswith('_')}\n }\n\n else:\n logger.warning('Unknown result type in _parse_results: {}'.format(result))\n\n assert result_reader.is_preview is False",
"def _process_spark_status_log(self, itr):\n # Consume the iterator\n for line in itr:\n line = line.strip()\n\n # Check if the log line is about the driver status and extract the status.\n if \"driverState\" in line:\n self._driver_status = line.split(' : ')[1] \\\n .replace(',', '').replace('\\\"', '').strip()\n\n self.log.debug(\"spark driver status log: {}\".format(line))",
"def parse_qstat_all_output(output_lines):\n\n if len(output_lines) < 1:\n return []\n\n if len(output_lines) < 3:\n raise PBSUtilQStatError('Bad qstat output:\\n\"%s\"' % '\\n'.join(output_lines))\n\n job_statuses = []\n\n for output_line in output_lines[5:]:\n job_record = output_line.split()\n record_job_id = parse_qsub_output(job_record[0])[0]\n record_job_state = job_record[9]\n name = job_record[3]\n queue = job_record[2]\n nodes = job_record[6]\n elapsed_time = job_record[10]\n username = job_record[1]\n job_statuses.append(JobStatus(record_job_id, record_job_state, name=name, elapsed_time=elapsed_time,\n username=username, queue=queue, nodes=nodes))\n\n return job_statuses"
] | [
"0.6954353",
"0.62820905",
"0.6002656",
"0.58837825",
"0.5661363",
"0.56151706",
"0.555977",
"0.5538942",
"0.55202043",
"0.5509902",
"0.548991",
"0.54785293",
"0.5459194",
"0.5427737",
"0.5423125",
"0.5409682",
"0.5383005",
"0.53386635",
"0.5331502",
"0.5330284",
"0.53202844",
"0.5319907",
"0.53102326",
"0.53041804",
"0.52998656",
"0.52831966",
"0.52822906",
"0.5274999",
"0.52535987",
"0.5251481"
] | 0.778952 | 0 |
Opens compressed file .bz2 | def bz2_file_reader(path):
return bz2.open(path, 'rt') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compress_bz2(filename):\n bz2_filename = filename + '.bz2'\n\n with open(filename, 'rb') as og_file, bz2.BZ2File(bz2_filename, 'wb') as bz2_file:\n for data in iter(lambda : og_file.read(100 * 1024), b''):\n bz2_file.write(data)\n\n return bz2_filename",
"def compress_bz2(filename):\n bz2_filename = filename + '.bz2'\n\n with open(filename, 'rb') as og_file, bz2.BZ2File(bz2_filename, 'wb') as bz2_file:\n for data in iter(lambda : og_file.read(100 * 1024), b''):\n bz2_file.write(data)\n\n return bz2_filename",
"def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath",
"def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath",
"def bz2_file_bytes_reader(path):\n return bz2.open(path, 'rb')",
"def open_compressed(filename, open_flag='r', compression_type='bz2'):\n # create temporary HDF5 file name\n hdf5_file_name = tempfile.mkstemp('.hdf5', 'bob_')[1]\n\n if open_flag == 'r':\n # extract the HDF5 file from the given file name into a temporary file name\n tar = tarfile.open(filename, mode=\"r:\" + compression_type)\n memory_file = tar.extractfile(tar.next())\n real_file = open(hdf5_file_name, 'wb')\n real_file.write(memory_file.read())\n del memory_file\n real_file.close()\n tar.close()\n\n return bob.io.base.HDF5File(hdf5_file_name, open_flag)",
"def open_gzip(fn):\n magic = b'\\x1f\\x8b\\x08'\n l = len(magic)\n with open(fn, 'rb') as f:\n file_start = f.read(l)\n f.seek(0)\n # check if the file is compressed\n if file_start.startswith(magic):\n return gzip.open(fn, 'rt')\n # not compressed\n return open(fn, 'rt')",
"def open_gz(filename, mode):\n return gzip.open(filename, mode)",
"def gzopen(f):\n return gzip.open(f, 'rb') if f.endswith('.gz') else open(f, 'r')",
"def smart_open(filename, mode):\n if filename.endswith(\".bz2\"):\n opener = bz2.BZ2File\n elif filename.endswith(\".gz\"):\n opener = gzip.open\n else:\n opener = open\n return opener(filename, mode)",
"def open_(filename, mode=None, compresslevel=9):\n if filename[-3:] == '.gz':\n if mode is None: mode = 'rt'\n return closing(gzip.open(filename, mode, compresslevel))\n else:\n if mode is None: mode = 'r'\n return open(filename, mode)",
"def openFile(file):\n file = file.lower()\n if file.endswith('.bz2'):\n return bz2.BZ2File(file)\n elif file.endswith('.gz'):\n return gzip.open(file)\n return open(file)",
"def open_gzipped(infile, mode='rt'):\n import gzip\n import bz2\n if mode.startswith('r'):\n tmode = 'rt'\n bmode = 'r'\n elif mode.startswith('w'):\n tmode = 'wt'\n bmode = 'w'\n elif mode.startswith('a'):\n tmode = 'at'\n bmode = 'a'\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return gzip.open(infile, tmode)\n if infile.endswith('.bz2'):\n if hasattr(bz2, 'open'):\n return bz2.open(infile, tmode)\n else:\n return bz2.BZ2File(infile, bmode)\n return open(infile, tmode)",
"def bgzip_tabix(bedbz2):\n bed = bedbz2.replace(\".bz2\", \"\")\n bedgz = bed + \".gz\"\n tbi = bedgz + \".tbi\"\n if os.path.exists(bedgz) and os.path.exists(tbi):\n print bedgz, tbi, \"has beed generated.\"\n return\n c1 = \"bzip2 -d %s\" % bedbz2\n c2 = \"bgzip %s\" % bed\n c3 = \"tabix -s 1 -b 2 -e 3 %s\" % bedgz\n call_sys([c1, c2, c3])",
"def open_file(file_path, mode=None, buffer_size=FILE_BUFFER_SIZE, gzip_exts=('.gz','.gzip'), partial=False):\n import io\n \n if os.path.splitext(file_path)[1].lower() in gzip_exts:\n if mode and 'w' in mode:\n file_obj = BufferedWriter(gzip.open(file_path, mode), buffer_size)\n \n else:\n if partial:\n file_obj = BufferedReader(gzip.open(file_path, mode or 'rb'), buffer_size)\n \n else:\n try:\n file_obj = subprocess.Popen(['zcat', file_path], stdout=subprocess.PIPE).stdout\n except OSError:\n file_obj = BufferedReader(gzip.open(file_path, mode or 'rb'), buffer_size)\n \n if sys.version_info.major > 2:\n file_obj = io.TextIOWrapper(file_obj, encoding=\"utf-8\")\n \n else:\n if sys.version_info.major > 2:\n file_obj = open(file_path, mode or 'rU', buffer_size, encoding='utf-8')\n \n else:\n file_obj = open(file_path, mode or 'rU', buffer_size)\n \n return file_obj",
"def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)",
"def uncompress_bzip2(location, target_dir):\n return uncompress(location, target_dir, decompressor=bz2.BZ2File)",
"def zip_open_bin(zip, filename):\n if isinstance(zip, FakeZip):\n return zip.open(filename, 'rb')\n else:\n return zip.open(filename, 'r')",
"def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)",
"def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)",
"def checkBZ2(path):\n devnull = open('/dev/null', 'w+')\n proc = subprocess.Popen(['/usr/bin/bzip2', '-t', path], shell=False,\n stdin=devnull, stdout=devnull, stderr=devnull)\n proc.communicate()\n return proc.returncode == 0",
"def open_(filename, *args):\n\n if (filename[-3:] == '.gz'):\n return gzip.open(filename, *args)\n try:\n return open(filename, *args)\n except OSError:\n return gzip.open(filename + \".gz\", *args)",
"def __init__(self, path: str):\n self._path = path\n self._fp = gzip.open(self._path, mode=\"r\")",
"def _open_zipped(infile, mode='r', encoding='utf-8'):\n mode = mode[0] + 't'\n p2mode = mode\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return _zopen(infile, mode)\n if infile.endswith('.bz2'):\n return _bopen(infile, mode)\n return open(infile, p2mode, encoding=encoding)",
"def debz(oldfn, newfn):\n if os.path.isfile(newfn):\n print(\"Error: refusing to overwrite existing file '%s'\" % (newfn, ))\n return\n output = open(newfn, 'wb')\n fobj = open(oldfn, 'rb')\n\n output.write(fobj.read(24))\n while True:\n sz = struct.unpack('>L', fobj.read(4))[0]\n chunk = fobj.read(sz)\n if not chunk:\n break\n output.write(bz2.decompress(chunk))\n # unsure of this\n if sz != len(chunk):\n break\n\n output.close()",
"def open_zipped(infile, mode='r'):\n mode = mode[0] + 't'\n p2mode = mode\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return gzip.open(infile, mode)\n if infile.endswith('.bz2'):\n if hasattr(bz2, 'open'):\n return bz2.open(infile, mode)\n else:\n return bz2.BZ2File(infile, p2mode)\n return open(infile, p2mode)",
"def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)",
"def smart_open(file, mode='rt', encoding='utf-8'):\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline=\"\\n\")\n return open(file, mode=mode, encoding=encoding, newline=\"\\n\")",
"def decompress_pickle(file):\n data = bz2.BZ2File(file, 'rb')\n data = cPickle.load(data)\n return data",
"def open(self):\n self.file = open(self.filename, \"rb\", buffering=self.bufferSize)"
] | [
"0.69785935",
"0.69785935",
"0.69372684",
"0.69372684",
"0.6839064",
"0.6691569",
"0.6567918",
"0.6491263",
"0.6439713",
"0.63826275",
"0.6346191",
"0.6245385",
"0.6123849",
"0.59531605",
"0.5905753",
"0.5889792",
"0.5867345",
"0.5854815",
"0.5793635",
"0.5793635",
"0.5776847",
"0.5770982",
"0.5750419",
"0.57466006",
"0.571027",
"0.5701218",
"0.5687989",
"0.56818736",
"0.567562",
"0.5668752"
] | 0.7192209 | 0 |
Opens compressed file .bz2 in bytes mode | def bz2_file_bytes_reader(path):
return bz2.open(path, 'rb') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compress_bz2(filename):\n bz2_filename = filename + '.bz2'\n\n with open(filename, 'rb') as og_file, bz2.BZ2File(bz2_filename, 'wb') as bz2_file:\n for data in iter(lambda : og_file.read(100 * 1024), b''):\n bz2_file.write(data)\n\n return bz2_filename",
"def compress_bz2(filename):\n bz2_filename = filename + '.bz2'\n\n with open(filename, 'rb') as og_file, bz2.BZ2File(bz2_filename, 'wb') as bz2_file:\n for data in iter(lambda : og_file.read(100 * 1024), b''):\n bz2_file.write(data)\n\n return bz2_filename",
"def bz2_file_reader(path):\n return bz2.open(path, 'rt')",
"def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath",
"def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath",
"def open_compressed(filename, open_flag='r', compression_type='bz2'):\n # create temporary HDF5 file name\n hdf5_file_name = tempfile.mkstemp('.hdf5', 'bob_')[1]\n\n if open_flag == 'r':\n # extract the HDF5 file from the given file name into a temporary file name\n tar = tarfile.open(filename, mode=\"r:\" + compression_type)\n memory_file = tar.extractfile(tar.next())\n real_file = open(hdf5_file_name, 'wb')\n real_file.write(memory_file.read())\n del memory_file\n real_file.close()\n tar.close()\n\n return bob.io.base.HDF5File(hdf5_file_name, open_flag)",
"def open_gzip(fn):\n magic = b'\\x1f\\x8b\\x08'\n l = len(magic)\n with open(fn, 'rb') as f:\n file_start = f.read(l)\n f.seek(0)\n # check if the file is compressed\n if file_start.startswith(magic):\n return gzip.open(fn, 'rt')\n # not compressed\n return open(fn, 'rt')",
"def smart_open(filename, mode):\n if filename.endswith(\".bz2\"):\n opener = bz2.BZ2File\n elif filename.endswith(\".gz\"):\n opener = gzip.open\n else:\n opener = open\n return opener(filename, mode)",
"def open_gz(filename, mode):\n return gzip.open(filename, mode)",
"def open_file(file_path, mode=None, buffer_size=FILE_BUFFER_SIZE, gzip_exts=('.gz','.gzip'), partial=False):\n import io\n \n if os.path.splitext(file_path)[1].lower() in gzip_exts:\n if mode and 'w' in mode:\n file_obj = BufferedWriter(gzip.open(file_path, mode), buffer_size)\n \n else:\n if partial:\n file_obj = BufferedReader(gzip.open(file_path, mode or 'rb'), buffer_size)\n \n else:\n try:\n file_obj = subprocess.Popen(['zcat', file_path], stdout=subprocess.PIPE).stdout\n except OSError:\n file_obj = BufferedReader(gzip.open(file_path, mode or 'rb'), buffer_size)\n \n if sys.version_info.major > 2:\n file_obj = io.TextIOWrapper(file_obj, encoding=\"utf-8\")\n \n else:\n if sys.version_info.major > 2:\n file_obj = open(file_path, mode or 'rU', buffer_size, encoding='utf-8')\n \n else:\n file_obj = open(file_path, mode or 'rU', buffer_size)\n \n return file_obj",
"def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)",
"def zip_open_bin(zip, filename):\n if isinstance(zip, FakeZip):\n return zip.open(filename, 'rb')\n else:\n return zip.open(filename, 'r')",
"def bgzip_tabix(bedbz2):\n bed = bedbz2.replace(\".bz2\", \"\")\n bedgz = bed + \".gz\"\n tbi = bedgz + \".tbi\"\n if os.path.exists(bedgz) and os.path.exists(tbi):\n print bedgz, tbi, \"has beed generated.\"\n return\n c1 = \"bzip2 -d %s\" % bedbz2\n c2 = \"bgzip %s\" % bed\n c3 = \"tabix -s 1 -b 2 -e 3 %s\" % bedgz\n call_sys([c1, c2, c3])",
"def gzopen(f):\n return gzip.open(f, 'rb') if f.endswith('.gz') else open(f, 'r')",
"def open_(filename, mode=None, compresslevel=9):\n if filename[-3:] == '.gz':\n if mode is None: mode = 'rt'\n return closing(gzip.open(filename, mode, compresslevel))\n else:\n if mode is None: mode = 'r'\n return open(filename, mode)",
"def open_gzipped(infile, mode='rt'):\n import gzip\n import bz2\n if mode.startswith('r'):\n tmode = 'rt'\n bmode = 'r'\n elif mode.startswith('w'):\n tmode = 'wt'\n bmode = 'w'\n elif mode.startswith('a'):\n tmode = 'at'\n bmode = 'a'\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return gzip.open(infile, tmode)\n if infile.endswith('.bz2'):\n if hasattr(bz2, 'open'):\n return bz2.open(infile, tmode)\n else:\n return bz2.BZ2File(infile, bmode)\n return open(infile, tmode)",
"def _open_file_binary(filename):\n for _byte in open(filename, 'rb').read():\n yield _byte",
"def debz(oldfn, newfn):\n if os.path.isfile(newfn):\n print(\"Error: refusing to overwrite existing file '%s'\" % (newfn, ))\n return\n output = open(newfn, 'wb')\n fobj = open(oldfn, 'rb')\n\n output.write(fobj.read(24))\n while True:\n sz = struct.unpack('>L', fobj.read(4))[0]\n chunk = fobj.read(sz)\n if not chunk:\n break\n output.write(bz2.decompress(chunk))\n # unsure of this\n if sz != len(chunk):\n break\n\n output.close()",
"def checkBZ2(path):\n devnull = open('/dev/null', 'w+')\n proc = subprocess.Popen(['/usr/bin/bzip2', '-t', path], shell=False,\n stdin=devnull, stdout=devnull, stderr=devnull)\n proc.communicate()\n return proc.returncode == 0",
"def open(self):\n self.file = open(self.filename, \"rb\", buffering=self.bufferSize)",
"def uncompress_bzip2(location, target_dir):\n return uncompress(location, target_dir, decompressor=bz2.BZ2File)",
"def openFile(file):\n file = file.lower()\n if file.endswith('.bz2'):\n return bz2.BZ2File(file)\n elif file.endswith('.gz'):\n return gzip.open(file)\n return open(file)",
"def do_gzip(fileobj):\r\n sio = cStringIO.StringIO()\r\n gzf = gzip.GzipFile(fileobj = sio, mode = \"wb\")\r\n while True:\r\n data = fileobj.read(buf_size)\r\n if not data:\r\n break\r\n gzf.write(data)\r\n gzf.close()\r\n return sio",
"def xopen(filename, mode='r'):\n\tassert isinstance(filename, basestring)\n\tif filename == '-':\n\t\treturn sys.stdin if 'r' in mode else sys.stdout\n\tif filename.endswith('.bz2'):\n\t\tif bz2 is None:\n\t\t\traise ImportError(\"Cannot open bz2 files: The bz2 module is not available\")\n\t\tif PY3:\n\t\t\treturn io.TextIOWrapper(bz2.BZ2File(filename, mode))\n\t\telse:\n\t\t\treturn bz2.BZ2File(filename, mode)\n\n\telif filename.endswith('.gz'):\n\t\tif PY3:\n\t\t\treturn io.TextIOWrapper(gzip.open(filename, mode))\n\t\telse:\n\t\t\tif 'r' in mode:\n\t\t\t\ttry:\n\t\t\t\t\treturn Popen(['gzip', '-cd', filename], stdout=PIPE).stdout\n\t\t\t\texcept IOError:\n\t\t\t\t\t# gzip not installed\n\t\t\t\t\treturn buffered_reader(gzip.open(filename, mode))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\treturn GzipWriter(filename)\n\t\t\t\texcept IOError:\n\t\t\t\t\treturn buffered_writer(gzip.open(filename, mode))\n\telse:\n\t\treturn open(filename, mode)",
"def __init__(self, system):\n super(Bzip2106, self).__init__(\"bzip2-1.0.6\", system, \"bzip2-1.0.6.tar.gz\")",
"def load_compressed(filename, compression_type='bz2'):\n # read from compressed HDF5\n hdf5 = open_compressed(filename, 'r')\n data = hdf5.read(\"array\")\n close_compressed(filename, hdf5)\n\n return data",
"def fetch_block(path, offset, block_size):\n with open(path, 'rb') as file:\n file.seek(offset)\n return bz2.decompress(file.read(block_size))",
"def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)",
"def _open_bytesio(stream, *args, **kwargs): # pylint: disable=unused-argument\n yield stream",
"def _load_bgzf_block(handle):\n\n # Pull in the BGZF block header information\n header, _ = _bgzf_metaheader(handle)\n XLEN = header[-4]\n BSIZE = struct.unpack('<H', handle.read(2))[0]\n\n # Expose the compressed data\n d_size = BSIZE - XLEN - 19\n d_obj = zlib.decompressobj(-15)\n data = d_obj.decompress(handle.read(d_size)) + d_obj.flush()\n\n # Checking data integrity\n CRC32, ISIZE = unpack_gzip_integrity(handle.read(_integrity_size))\n deflated_crc = zlib.crc32(data)\n if deflated_crc < 0:\n deflated_crc = deflated_crc % (1 << 32)\n if CRC32 != deflated_crc:\n raise ValueError('CRCs are not equal: is {}, not {}'.format(CRC32, deflated_crc))\n if ISIZE != len(data):\n raise ValueError('unequal uncompressed data size')\n\n return BSIZE + 1, data"
] | [
"0.7045241",
"0.7045241",
"0.69642025",
"0.68728",
"0.68728",
"0.64734644",
"0.62642217",
"0.6183835",
"0.61643773",
"0.5998363",
"0.59920585",
"0.59511447",
"0.5924625",
"0.59138536",
"0.5905901",
"0.5905238",
"0.5795446",
"0.57917845",
"0.5782833",
"0.5713188",
"0.56975424",
"0.5688687",
"0.5611458",
"0.55615574",
"0.5548987",
"0.55431515",
"0.55407023",
"0.55002093",
"0.5493032",
"0.5482316"
] | 0.72273827 | 0 |
Opens a txt file and loads tabseparated columns into a dictionary | def load_dict_from_txt_file(path, key_type=str, value_type=str):
with txt_file_reader(path) as txt_file:
return {key_type(key): value_type(value) for key, value in [line.strip().split('\t') for line in txt_file]} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadtwocol(infile):\n kvdict = {}\n with open(infile) as f:\n for nline, line in enumerate(f):\n line = line.strip()\n if line: # to exclude blank line\n k, v = line.split('\\t')\n kvdict[k] = v\n return kvdict",
"def open_txt_table(txt_file, data_start_with=2,keys_line=0, types_line=1, split_char=\"\\t\"):\n file = open(txt_file,\"r\")\n i = 0;\n line = file.readline()\n keys = []\n types = []\n txt_table_rows = []\n while line != \"\":\n line = line.strip(\"\\n\")\n line = line.strip(\"\\r\")\n if(i >= data_start_with):\n values = line.split(split_char)\n n = len(values)\n values += [\" \" for x in range(len(keys) - n)]\n txt_table_rows.append(\n TxtTableRow(keys, values, types)\n )\n elif(i==keys_line):\n keys = line.split(split_char)\n elif(i == types_line):\n types = line.split(split_char)\n i += 1\n line = file.readline()\n\n file.close()\n return txt_table_rows",
"def read_mappings(fname):\n mapping = {}\n for line in open(fname):\n vals = line.strip().split('\\t')\n if len(vals) < 2: continue\n mapping[vals[0]] = vals[1]\n return mapping",
"def read_from_file():\n\n plik=open('data.txt')\n for line in plik:\n (key, temp, weat, pres)=line.split()\n dict_from_file[key]=[temp, weat.replace('_',' '), pres]",
"def read_field(file_name):\n\n f = open(file_name, 'r', encoding='utf-8', errors='ignore')\n data = dict()\n row = 1\n for i in f:\n n = 1\n i = i.strip('\\n')\n for symb in i:\n data[(row, n)] = symb\n n += 1\n row += 1\n return data",
"def read_text_file(file_name):\n \n file_data = {}\n \n with open(file_name) as fp:\n lines = fp.readlines()\n for line in lines:\n lineno = line.strip().split(':')[0].strip()\n #here we are checking whether a particluar line in the file contains a valid data [i.e line number and content]\n try:\n content = line.strip().split(':')[1].strip()\n file_data[lineno] = content\n except IndexError:\n pass\n \n return file_data",
"def read_label_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n ret = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n ret[int(pair[0])] = pair[1].strip()\n else:\n ret[row_number] = pair[0].strip()\n return ret",
"def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").split(\"\\t\"))\n l += 1\n return (meta, data)",
"def parse_translation(transl):\n t_table = {}\n with open(transl, 'r') as f:\n for line in f:\n tmp = line.strip().split('\\t')\n fun_id = tmp[2]\n gene_name = tmp[0]\n t_table[fun_id] = gene_name\n return t_table",
"def open_tsv(path, multi=False, encoding='utf-8'):\n xx = 0\n DRtn = {}\n for line in file_iter(path, encoding=encoding):\n line = line.strip('\\r\\n')\n LSplit = line.split('\\t')\n \n if xx == 0:\n LKeys = LSplit\n else: \n yy = 0\n DItem = {}\n for key in LKeys:\n DItem[key] = LSplit[yy]\n yy += 1\n \n key = DItem[LKeys[0]]\n if not multi:\n # A single primary key\n assert not key in DRtn, key\n DRtn[key] = DItem\n elif multi == -1:\n # Country codes HACK!\n if key in DRtn: \n continue\n DRtn[key] = DItem\n else: \n # Can have multiple primary keys\n # (e.g. language index info)\n if not key in DRtn: \n DRtn[key] = []\n DRtn[key].append(DItem)\n del DItem[LKeys[0]]\n xx += 1\n return DRtn",
"def loadtwocol_dlist(infile):\n kvdict = defaultdict(list)\n with open(infile) as f:\n for line in f:\n line = line.strip()\n if line:\n k, v = line.split('\\t')\n kvdict[v].append(k)\n return kvdict",
"def read_ngrams(path):\r\n\r\n file = open(path, 'r', encoding='utf-8')\r\n container = {}\r\n for line in file:\r\n collocation_tags, freq = line.strip('\\n').split('\\t')\r\n container[collocation_tags] = freq\r\n return container",
"def load_data():\n categories = {}\n\n # Sorry: This is lazy file reading.\n f = open(TSV, 'r')\n for line in f.readlines()[1:]:\n line = line.strip()\n\n if not line or line.startswith(('id', '#')):\n continue\n\n # It's tab-delimited, so split on tabs.\n line = line.split('\\t')\n categories.setdefault(line[1], []).append(line)\n\n return categories",
"def read_as_dict(path, delimiter=\"\\t\"):\n\treader = read_lines(path)\n\thead = reader.next().split(delimiter)\n\tlen_head = len(head) - 1\n\tfor line in reader:\n\t\tarray = line.split(delimiter, len_head)\n\t\tyield {head[i]: array[i] for i in range(len_head)}",
"def _load_txt(file, devices, channels, header):\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%% Columns of the selected channels %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n out_dict = {}\n for dev_nbr, device in enumerate(devices):\n out_dict[device] = {}\n columns = []\n for chn in channels[device]:\n columns.append(header[device][\"column labels\"][chn])\n # header[device][\"column labels\"] contains the column of .txt file where the data of\n # channel \"chn\" is located.\n out_dict[device][\"CH\" + str(chn)] = np.loadtxt(fname=file, usecols=header[device][\"column labels\"][chn])\n\n return out_dict",
"def read_file(filetxt):\n\n fasta_dict = {}\n with open(filetxt,'r') as text:\n dataset = text.readlines()\n\n for line in dataset:\n line = line.strip()\n if line.startswith('>'):\n fasta_dict[line[1:]] = ''\n current_line = line[1:]\n else:\n fasta_dict[current_line] += line\n\n return fasta_dict",
"def parseKallisto(fname):\n\n logging.debug(\"parsing %s\" % fname)\n ifh = open(fname)\n ifh.readline()\n\n d = {}\n for line in ifh:\n fs = line.rstrip(\"\\n\").split(\"\\t\")\n if fs[tpmColumnIndex]==\"0\" and not addZeros:\n continue\n d[fs[0]] = float(fs[tpmColumnIndex])\n return d",
"def load_file(fpath):\n sick_data = {'X_A': [], 'X_B': [], 'y': []}\n with open(fpath, 'r', encoding='utf-8') as f:\n for line in f:\n text = line.strip().split('\\t')\n sick_data['X_A'].append(text[5].split())\n sick_data['X_B'].append(text[6].split())\n sick_data['y'].append(float(text[4]))\n return sick_data",
"def _read_gtf(gtf):\n if not gtf:\n return gtf\n db = defaultdict(list)\n with open(gtf) as in_handle:\n for line in in_handle:\n if line.startswith(\"#\"):\n continue\n cols = line.strip().split(\"\\t\")\n name = [n.split(\"=\")[1] for n in cols[-1].split(\";\") if n.startswith(\"Name\")]\n chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6]\n if cols[2] == \"miRNA_primary_transcript\":\n db[name[0]].append([chrom, int(start), int(end), strand])\n return db",
"def _load_tokens(self, filename):\n self.tokens = dict()\n with open(filename, 'r') as f:\n for line in f.readlines():\n raw = line.strip().split('\\t')\n self.tokens[raw[0]] = int(raw[1])",
"def read_dict(txt_file_path):\n txt_file = open(txt_file_path,'r')\n txt_raw = txt_file.read()\n txt_as_dict = ast.literal_eval(txt_raw)\n txt_file.close()\n return txt_as_dict",
"def parse_data(infile):\n blocks = re.compile(' '.join(['=' * 9] * 8))\n dashes = re.compile('^-{79}$')\n title = re.compile('^Timings for (.*)$')\n row = re.compile(' '.join(['(.{9})'] * 7) + ' (.{8,9})')\n\n lines = infile.readlines()\n\n data = co.OrderedDict()\n index = 0\n\n while index < len(lines):\n line = lines[index]\n\n if blocks.match(line):\n try:\n name = title.match(lines[index + 1]).group(1)\n except Exception:\n index += 1\n continue\n\n data[name] = {}\n\n assert dashes.match(lines[index + 2])\n\n cols = parse_row(row, lines[index + 3])\n\n assert blocks.match(lines[index + 4])\n\n get_row = parse_row(row, lines[index + 5])\n assert get_row[0] == 'get'\n\n set_row = parse_row(row, lines[index + 6])\n assert set_row[0] == 'set'\n\n delete_row = parse_row(row, lines[index + 7])\n assert delete_row[0] == 'delete'\n\n assert blocks.match(lines[index + 9])\n\n data[name]['get'] = dict(zip(cols, get_row))\n data[name]['set'] = dict(zip(cols, set_row))\n data[name]['delete'] = dict(zip(cols, delete_row))\n\n index += 10\n else:\n index += 1\n\n return data",
"def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary",
"def _read_tsv(file_path):\n translation_pairs = []\n with file_path.open() as f:\n # Note: the correct way to do this is with csv.DictReader, but some examples\n # have quote characters that confuse the csv parser. Since we know the\n # source never has its own tab or newline characters, basic Python string\n # manipulation is fine here, as long as the model doesn't predict tabs or\n # newlines.\n for line in f:\n line = line.strip()\n line = line.split('\\t')\n if len(line) != 2:\n raise ValueError(\n f'Line {line} could not be parsed. You may need to manually '\n 'replace tab or newline characters in the model output with '\n 'spaces.'\n )\n source, translation = line\n translation_pairs.append(\n evaluation.TranslationPair(source=source, translation=translation)\n )\n return translation_pairs",
"def load_map_from_sql_stdout(self, in_file, skip_header = False):\n data = dict()\n with open(in_file) as pairs_file:\n for line in pairs_file:\n if skip_header:\n skip_header = False\n continue\n (key, val) = line.strip().split(\"\\t\")\n data[key] = val\n return data",
"def load_to_dict(filename):\n dictionary = dict()\n with open(filename) as ff:\n for line in ff:\n (key, val) = line.split()\n dictionary[key] = val\n return dictionary",
"def map_tsv(tsv_file: IO) -> Dict[int, Tuple[int, int]]:\n sys.stderr.write(\"Mapping articles to lines in the de tsv file...\\n\")\n last_id = None\n document_start = 0\n current_line = 0\n mapping_dict = dict()\n article_length = 0\n mapped_articles = 0\n\n line = tsv_file.readline()\n while line:\n article_id = int(line.split(\"\\t\")[0])\n # new article begins\n if article_id != last_id:\n if last_id is None:\n mapping_dict[article_id] = (document_start, article_length)\n else:\n mapping_dict[last_id] = (document_start, article_length)\n document_start = current_line\n article_length = 0\n last_id = article_id\n mapped_articles += 1\n\n if mapped_articles % 100000 == 0:\n sys.stderr.write(f\"Mapped {mapped_articles} de articles...\\n\")\n\n article_length += 1\n current_line = tsv_file.tell()\n line = tsv_file.readline()\n\n mapping_dict[last_id] = (document_start, article_length)\n\n sys.stderr.write(f\"Done, mapped {len(mapping_dict)} unique articles to lines.\\n\")\n return mapping_dict",
"def readfile(filename):\n\n\tfile = open(filename, \"rt\")\n\n\tretval = {}\n\n\tT = int(file.readline().strip())\n\tretval['T'] = T\n\n\ttests = []\n\n\tfor i in xrange(T):\n\t\tline = file.readline().strip()\n\n\t\tparts = line.split(\" \")\n\n\t\tN = int(parts[0])\n\t\tS = int(parts[1])\n\t\tp = int(parts[2])\n\t\tt = []\n\n\t\tfor j in xrange(N):\n\t\t\tt = t + [int(parts[3 + j]), ]\n\n\t\ttest = { 'N' : N, 'S': S, 'p' : p, 't' : t }\n\t\ttests = tests + [test, ]\n\n\tretval['tests'] = tests\n\treturn retval",
"def read_data(columns, types = {}, filename= \"data/wxobs20170821.txt\"):\n #Initialize my data variable\n data = {}\n for column in columns:\n data[column] = []\n\n with open(filename, \"r\") as datafile:\n # read first three line (header)\n for _ in range(3):\n #print(_)\n datafile.readline()\n\n\n # Read and parse the rest of the file\n for line in datafile:\n split_line = line.split()\n for column in columns:\n i = columns[column]\n t = types.get(column, str)\n value = t(split_line[i])\n data[column].append(value)\n\n return data",
"def open_csv(file, dct):\n with open(file) as csv_file:\n f_csv = csv.reader(csv_file)\n column_headings = next(f_csv)\n csv_row = namedtuple('Row', column_headings)\n for rows in f_csv:\n row = csv_row(*rows)\n dct[row.term] = row.definition"
] | [
"0.7027454",
"0.69427824",
"0.6827322",
"0.63295215",
"0.63261515",
"0.6240552",
"0.623014",
"0.62298214",
"0.6190121",
"0.6161519",
"0.6150538",
"0.61352277",
"0.612601",
"0.60427094",
"0.60386354",
"0.6011791",
"0.60058546",
"0.5992848",
"0.5989369",
"0.5974285",
"0.59722376",
"0.59672517",
"0.5954496",
"0.59417075",
"0.59400755",
"0.59221566",
"0.5903643",
"0.58976233",
"0.5893417",
"0.58879405"
] | 0.7086704 | 0 |
provide functionality to associate an account with the market segment Raises ValueError if the market segment already knows about the account | def add_account(self, account, add_ms_to_account=True):
# check if name already exists and throw ValueError if it does
# it doesn't make sense to add an account twice -- this could be
# refactored to use a set instead
# check for accounts by name per Q2 bonus below
if account.name in [account.name for account in self._accounts]:
raise ValueError("{} already associated to {}".format(account.name,
self.name))
self._accounts.append(account)
if add_ms_to_account:
# add_account_to_ms is False because we've already added the
# account to this segment, don't want to do it again
account.add_to_market_segment(self, add_account_to_ms=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put_account(self, account):\n \n pass",
"def associate_member_account(memberAccountId=None):\n pass",
"def add_account(insert_dict):\n return ar.add_account(insert_dict)",
"def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed",
"def save_accounts(account):\n account.save_account()",
"def save_accounts(account):\n account.save_account()",
"def add(self, account):\n if isinstance(account, Account) and account not in self.account:\n self.account.append(account)",
"def set_market_segments(self, segments):\r\n \"\"\"\r\n Q1-2. Implement this method, which takes an iterable of MarketSegments\r\n to which this Account will be attached. This method REPLACES all\r\n MarketSegment associations, so be sure to update each\r\n MarketSegment's internal representation of associated Accounts\r\n appropriately.\r\n \"\"\"\r\n for existing_segment in self._market_segments:\r\n # only need to remove the ones that aren't in the new list\r\n if existing_segment not in segments:\r\n existing_segment.remove_account(self)\r\n for segment in segments:\r\n # add segments, catch ValueErrors which means the segment was\r\n # already part of this account, therefor no followup action is\r\n # needed\r\n try:\r\n self._market_segments.append(segment)\r\n # add_ms_to_account needs to be False because we've already\r\n # added the segment to this account\r\n segment.add_account(self, add_ms_to_account=False)\r\n except ValueError:\r\n # this account was already associated to that segment,\r\n # continue on\r\n continue",
"def __init__(self, name, accounts=None):\r\n self.name = name\r\n if accounts:\r\n self._accounts = accounts\r\n for account in accounts:\r\n # add_account_to_ms is False because we've already added the\r\n # account to this segment, don't want to do it again\r\n account.add_to_market_segment(self, add_account_to_ms=False)\r\n else:\r\n self._accounts = []\r\n check_for_existing_market_segment(self)",
"def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')",
"def register_account(self, contract_name, budget_name, ynab_account_name, account_id=None):\n ynab_contract = self.get_contract_by_name(contract_name)\n if not ynab_contract:\n self._logger.error('Could not get contract by name \"%s\"', contract_name)\n return False\n try:\n account_wrapper = getattr(importlib.import_module('ynabintegrationslib.adapters'),\n f'{ynab_contract.bank}{ynab_contract.type}')\n account = ynab_contract.contract.get_account(account_id)\n self._accounts.append(account_wrapper(account,\n self._ynab,\n budget_name,\n ynab_account_name))\n return True\n except Exception: # pylint: disable=broad-except\n self._logger.exception('Problem registering account')\n return False",
"def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)",
"def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account",
"def account(self, account: str):\n self._account = account",
"def create_account():\n user_id = get_jwt_identity()\n user = User.filter(id=user_id)[0]\n data = json.loads(request.data)\n\n if 'title' not in data:\n return jsonify_response({\"errors\": \"`title` field is required.\"}, 400)\n\n held_accounts = user.get_held_accounts(user.id)\n if held_accounts:\n user_accounts = \",\".join(f\"'{i}'\" for i in held_accounts)\n user_account_names_q = \\\n f\"g.V().hasLabel('{Account.LABEL}')\" + \\\n f\".has('id', within({user_accounts}))\" + \\\n f\".values('title')\"\n user_account_names = client.submit(user_account_names_q).all().result()\n\n if data[\"title\"] in user_account_names:\n return jsonify_response(\n {\"errors\": \"Users with the title already exist\"}, 400)\n\n account = Account.create(title=data[\"title\"])\n edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"secondary\")\n\n response = {\n \"title\": account.title\n }\n return jsonify_response(response, 201)",
"def add_to_market_segment(self, market_segment, add_account_to_ms=True):\r\n if market_segment in self._market_segments:\r\n raise ValueError(\"{name} already part of {ms_name}\"\r\n .format(name=self.name,\r\n ms_name=market_segment.name))\r\n self._market_segments.append(market_segment)\r\n if add_account_to_ms:\r\n # add_ms_to_account needs to be False since this account already\r\n # knows about the market segment\r\n market_segment.add_account(self, add_ms_to_account=False)",
"def add_account(self, account, replace=False):\n for asset in account.assets():\n for asset_class in asset.class2ratio.keys():\n assert asset_class in self._leaf_asset_classes, (\n f'Unknown or non-leaf asset class: {asset_class}')\n\n assert replace or account.name() not in self._accounts, (\n f'Attempting to add duplicate account: {account.name()}')\n\n self._accounts[account.name()] = account\n return self",
"def save_account(self):\n Credential.account_list.append(self)",
"def add(self):\n d = {}\n\n self.ok_signal.emit(d, 'account')",
"def new_account(firstname, lastname, pin):\n pass",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def accounts():",
"def _create_account(self, new_account):\n if User.check_existing_user(new_account['account_holder']):\n user = User(new_account['account_holder'])\n if new_account['account_type'] in user.accounts.keys():\n self.session.output({\n 'error':\n 'user already has an account of this type. Returning to main menu.\\n'},\n '[ INVALID ACCOUNT TYPE ERROR ]')\n self._navigate_mainmenu(1)\n return False\n else:\n new_account_created = Account(userid=user.user_id, account_type=new_account['account_type'],\n balance=new_account['initial_balance'])\n self.session.output(new_account_created.get_info(),\n '\\n[ New account created for user {} ]'.format(new_account['account_holder']))\n return True\n else:\n self.session.output({'invalid_account_holder': 'please enter valid account holder id\\n'},\n '\\n[ USER ID ERROR ]')\n return False",
"def get_account(self, account):\n \n pass",
"def put(self, account=None, user=None, account_id=None):\n return super().put()",
"def newaccount(accountname, account, owner, active, memo, posting, create_claimed_account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n if owner is None or active is None or memo is None or posting is None:\n password = click.prompt(\"Keys were not given - Passphrase is used to create keys\\n New Account Passphrase\", confirmation_prompt=True, hide_input=True)\n if not password:\n print(\"You cannot chose an empty password\")\n return\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, password=password)\n else:\n tx = mph.create_account(accountname, creator=acc, password=password)\n else:\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting)\n else:\n tx = mph.create_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting) \n tx = json.dumps(tx, indent=4)\n print(tx)",
"def amazon_accounts():\n import json\n from security_monkey.datastore import Account, AccountType\n from os.path import dirname, join\n\n data_file = join(dirname(dirname(__file__)), \"data\", \"aws_accounts.json\")\n data = json.load(open(data_file, 'r'))\n\n app.logger.info('Adding / updating Amazon owned accounts')\n try:\n account_type_result = AccountType.query.filter(AccountType.name == 'AWS').first()\n if not account_type_result:\n account_type_result = AccountType(name='AWS')\n db.session.add(account_type_result)\n db.session.commit()\n db.session.refresh(account_type_result)\n\n for group, info in data.items():\n for aws_account in info['accounts']:\n acct_name = \"{group} ({region})\".format(group=group, region=aws_account['region'])\n account = Account.query.filter(Account.identifier == aws_account['account_id']).first()\n if not account:\n app.logger.debug(' Adding account {0}'.format(acct_name))\n account = Account()\n else:\n app.logger.debug(' Updating account {0}'.format(acct_name))\n\n account.identifier = aws_account['account_id']\n account.account_type_id = account_type_result.id\n account.active = False\n account.third_party = True\n account.name = acct_name\n account.notes = info['url']\n\n db.session.add(account)\n\n db.session.commit()\n app.logger.info('Finished adding Amazon owned accounts')\n except Exception as e:\n app.logger.exception(\"An error occured while adding accounts\")\n store_exception(\"manager-amazon-accounts\", None, e)"
] | [
"0.6881962",
"0.6379945",
"0.6314283",
"0.6247812",
"0.614936",
"0.614936",
"0.6070196",
"0.6060519",
"0.60320276",
"0.59797066",
"0.5976318",
"0.58752096",
"0.586902",
"0.5846821",
"0.5827182",
"0.5821246",
"0.57961375",
"0.5756492",
"0.5755137",
"0.5746932",
"0.57255113",
"0.57255113",
"0.57255113",
"0.57255113",
"0.5721108",
"0.57188565",
"0.5707494",
"0.56822026",
"0.5665522",
"0.5619682"
] | 0.6542094 | 1 |
get the sales rep assocated to this Account | def get_sales_rep(self):
return self._sales_rep | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sales_rep_code(self):\n return self._sales_rep_code",
"def get_sales_data():\n print(\"Retrieving all the sales information...\")\n data = SHEET.worksheet('sales')\n print(\"Compilation complete!\\n\")\n return data",
"def get_sales(res=None, user_email=None, user_role=None, user_id=None):\n sales = sale_insatnce.get_sales()\n sales_data = []\n for i in sales:\n data = {\n \"email\": i[2],\n \"sale_id\": i[0],\n \"user_id\": i[1],\n \"cost\": i[3],\n \"description\": i[4]\n }\n sales_data.append(data)\n\n return (jsonify({\n \"sales\": sales_data\n }))",
"def get_all_sales(self):\n all_sales = self.dbconn.get_all_sales()\n return all_sales",
"def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)",
"def get_Srs(self):\n return self.Srs",
"def getInvoice(self):\n return self.base.get(\"invoice\", [])",
"def show_rentals(product_id): # {{{\n cust_rent_dict = {}\n try:\n with MONGO:\n DATABAE = MONGO.connection.assignment_07\n customer_rental = DATABAE.rental.aggregate(\n [\n {\n \"$lookup\": {\n \"from\": \"customer\",\n \"localField\": \"user_id\",\n \"foreignField\": \"user_id\",\n \"as\": \"customer_rentals\",\n }\n },\n {\"$match\": {\"product_id\": product_id}},\n ]\n )\n except TypeError as excep:\n LOGGER.info(\n \"Error retrieving customer who rented product: %s\", product_id)\n LOGGER.info(excep)\n\n try:\n for customer in customer_rental:\n cust_rent_dict[customer[\"user_id\"]] = {\n \"name\": customer[\"customer_rentals\"][0][\"name\"],\n \"address\": customer[\"customer_rentals\"][0][\"address\"],\n \"phone_number\": customer[\"customer_rentals\"][0][\"phone_number\"],\n \"email\": customer[\"customer_rentals\"][0][\"email\"],\n }\n except TypeError as excep:\n LOGGER.info(\"Error formatting retrieved customer rental info\")\n LOGGER.info(excep)\n else:\n if not cust_rent_dict:\n LOGGER.info(\"Product: %s not found.\", product_id)\n else:\n LOGGER.info('Retrieved rental info for product: %s', product_id)\n return cust_rent_dict # }}}",
"def get_sale(id, res=None, user_role=None, user_id=None, user_email=None):\n sale = sale_insatnce.get_sale(id)\n if sale:\n data = {\n \"cost\": sale[3],\n \"sale_id\": sale[0],\n \"user_id\": sale[1],\n \"description\": sale[4]\n }\n return jsonify({\n \"sales\": data\n })\n else:\n return jsonify(\n {\n \"message\": \"sale not found\"\n }\n ), 404",
"def all_sales(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM sales;\")\n res = cur.fetchall()\n sales_records=[]\n for a_sale in res:\n record = {\n 'sales_id':a_sale[0],\n 'attendant':a_sale[1],\n 'product_name':a_sale[2],\n 'price':a_sale[3],\n 'quantity':a_sale[4]\n }\n sales_records.append(record)\n return jsonify({\"Records\": sales_records}), 200",
"def set_sales_rep(self, sales_rep):\r\n self._sales_rep = sales_rep",
"def get_data_sales(self):\n return {\n 'search_type': SearchForm.SEARCH_TYPE_SALE,\n 'min_price': '40000',\n 'max_price': '50000',\n 'location':'Test, Test',\n 'min_bedrooms': '5',\n 'property_type': str(PropertyTypeFactory().slug)\n }",
"def get_user_sales(id, res=None, user_role=None, user_id=None, user_email=None):\n sales = sale_insatnce.get_user_sales(id)\n sales_data = []\n for i in sales:\n data = {\n \"email\": i[2],\n \"sale_id\": i[0],\n \"user_id\": i[1],\n \"cost\": i[3],\n \"description\": i[4]\n }\n sales_data.append(data)\n\n return (jsonify({\n \"sales\": sales_data\n }))",
"def get_all_sales():\n admin = \"admin\"\n if [\"role\"] != admin:\n return jsonify({\"message\": \"Only an admin can view all sales records\"}), 401\n response = jsonify(sale_object.get_all_sales())\n response.status_code = 200\n return response",
"def view_total_sales():\n # Later will add the ability to sort by date and Category\n try:\n with session_scope() as db_session:\n orders = db_session.query(Order).all()\n\n if len(orders) < 1:\n return {\n 'code': 404,\n 'message': 'There are no sales'\n }, 404\n\n nmbr_itm = 0\n for order in orders:\n for items in order.order_lines:\n nmbr_itm = nmbr_itm + items.quantity\n\n except DBAPIError as db_error:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)\n }, 400\n except NoResultFound:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': \"No sales have been registered\"\n }, 400\n return {\n 'numberItems': nmbr_itm\n }, 200",
"def show_rentals(product_id):\n rentals = {}\n\n if not collection_exist(DATABASE, RENTALS_COLLECTION) or \\\n not collection_exist(DATABASE, CUSTOMER_COLLECTION):\n return rentals\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n rental_count = 0\n for record in database[RENTALS_COLLECTION].find({\"product_id\": product_id}):\n customer = database[CUSTOMER_COLLECTION].find_one({\"user_id\": record[\"user_id\"]})\n rentals[customer['user_id']] = {'name': customer['name'],\n 'address': customer['address'],\n 'phone_number': customer['phone_number'],\n 'email': customer['email']}\n rental_count += 1\n\n return rental_count",
"def sales(self, sales):\n\n self._sales = sales",
"def adjustments(self):\r\n return ads.AccountAdjustments(self)",
"def _get_journal(self):\n context = dict(self._context or {})\n type_inv = context.get('type', 'in_invoice')\n type2journal = {'out_invoice': 'src_sale',\n 'in_invoice': 'src_purchase'}\n journal_obj = self.env['account.journal']\n user = self.env['res.users'].browse(\n )\n company_id = context.get('company_id', user.company_id.id)\n domain = [('company_id', '=', company_id)]\n domain += [('type', '=', type2journal.get(\n type_inv, 'src_purchase'))]\n res = journal_obj.search( domain, limit=1)\n return res and res[0] or False",
"def invoices(self):\r\n return inv.Invoices(self)",
"def get_customer(self):\n try:\n cursor = self.db.cursor()\n cursor.execute(\"SELECT * FROM costumers WHERE dni=?\", (self.dni,))\n return cursor.fetchall()\n except:\n print(\"Error\")",
"def invoices(self):\r\n return inv.AccountInvoices(self)",
"def customers(self):\r\n return customers.Customers(self)",
"def show_rentals(product_id):\n\n mongo = MongoDBConnection()\n\n result = {}\n with mongo:\n db = mongo.connection.HPNorton\n customercollection = db[\"customers\"]\n for document in customercollection.find({\"rentals\": {\"$in\": [product_id]}}):\n key = document['customer_id']\n\n result[key] = {\n 'name': document['name'],\n 'address': document['address'],\n 'phone_number': document['phone_number'],\n 'email': document['email']\n }\n\n return result",
"def add_receipt(self,sale,name):\n id = self.find_employee_id(name)\n if id in self.receipts:\n self.receipts[id].append(sale)\n else:\n self.receipts[id] = [sale]\n return self.receipts",
"def get_rentals_on_my_rigs(self, **kwargs):\n kwargs['type'] = 'owner'\n return self.mrr_obj.get('/rental', params=kwargs)",
"def _retention_rate(self):\n res = {}\n for ret_line in self.browse(self.ids):\n if ret_line.invoice_id:\n pass\n else:\n res[ret_line.id] = 0.0\n return res",
"def get(self, args):\n return Payment.query.offset(args['offset']).limit(args['limit'])",
"def total_sales():\n data = []\n orders = Order.objects.all()\n for order in orders:\n data.append(order.get_total_cost())\n return sum(data)",
"def getCustomer(self):\n return self.base.get(\"customer\", [])"
] | [
"0.60808676",
"0.59343404",
"0.5843296",
"0.58057",
"0.5775382",
"0.57279",
"0.5499945",
"0.5478197",
"0.5337665",
"0.53167444",
"0.53152895",
"0.5267397",
"0.5243935",
"0.52410465",
"0.52232355",
"0.52158624",
"0.52153754",
"0.52091914",
"0.5116701",
"0.5112952",
"0.511263",
"0.50803536",
"0.50741655",
"0.5069309",
"0.50315446",
"0.5027781",
"0.49920437",
"0.49638012",
"0.49632075",
"0.4957412"
] | 0.7881946 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.