query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test case for aws_service_api_keypair_get
def test_aws_service_api_keypair_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_keypairs_get(self):\n pass", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def test_aws_service_api_keypair_delete(self):\n pass", "def test_aws_service_api_keypair_import_post(self):\n pass", "def test_create_api_key(self):\n pass", "def test_get_public_key(self):\n query_string = [('agentid', 'false'),\n ('companyid', 'false')]\n response = self.client.open(\n '/v0_9_1/PublicKeys',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n for key, value in pairs.items():\n tempconfig.write(\"{0}: {1}\\n\".format(\n key, value).encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, \"Spam\"), value)\n for key, value in pairs.items():\n self.assertEqual(config.getRequiredKey(key), value)\n finally:\n os.remove(tempconfig.name)", "def test_get_keys(self):\n response = self.client.get_keys()\n assert isinstance(response, dict)\n assert 'public' in response\n assert 'private' in response\n assert response['public'] is not None\n assert response['private'] is not None", "def test_generate_api_key():\n\n key = auth.generate_api_key() # returns a NamedTuple with api_key and hashed_key\n hashed_api_key = sha256(key.api_key.encode('utf-8')).hexdigest()\n assert hashed_api_key == key.hashed_key", "def test_get_user_api_keys(self):\n pass", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def get(self, name):\n path = '/os-keypairs/%s' % name\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pair %s: %s' % (name, truncate(res)))\n return res[0]['keypair']", "def getAwsKeypair(directory=None):\n if directory is None:\n directory = './'\n with open(directory + 'access.key', 'r+') as fp:\n access_key = fp.read()\n with open(directory + 'secret.key', 'r+') as fp:\n secret_key = fp.read()\n return (access_key, secret_key)", "def test_get_cloud_organization_api_key(self):\n pass", "def keypair_lookup(session):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_key_pairs()\n\n # If SSH_KEY exists and points to a valid Key Pair, use it\n key = os.environ.get(\"SSH_KEY\", None) # reuse bastion.py env vars\n if key is not None:\n kp_name = os.path.basename(key)\n if kp_name.endswith(\".pem\"):\n kp_name = kp_name[:-4]\n for kp in response['KeyPairs']:\n if kp[\"KeyName\"] == kp_name:\n return kp_name\n\n print(\"Key Pairs\")\n for i in range(len(response['KeyPairs'])):\n print(\"{}: {}\".format(i, response['KeyPairs'][i]['KeyName']))\n if len(response['KeyPairs']) == 0:\n return None\n while True:\n try:\n idx = input(\"[0]: \")\n idx = int(idx if len(idx) > 0 else \"0\")\n return response['KeyPairs'][idx]['KeyName']\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n print(\"Invalid Key Pair number, try again\")", "def GetKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_key(self, user, api_key):\n return True", "def test_api_key(self):\n self.assertEqual(self.route4me.key, '11111111111111111111111111111111')", "def test_getKey_keyexists(self):\n filename = self.mktemp()\n with open(filename, 'wb') as fh:\n fh.write(SEKRIT_KEY)\n fh.flush()\n\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))\n self.assertEqual(SEKRIT_KEY, key,\n \"\"\"The example key and the one read from file differ!\n key (in hex): %s\n SEKRIT_KEY (in hex): %s\"\"\"\n % (key.encode('hex'), SEKRIT_KEY.encode('hex')))", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def get_api_key(api_key):\n api.get(api_key)", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def test_get_kms_key_alias(self):\n service_key_alias = 'service-name'\n key_arn = 'random-ARN'\n self.mock_kms.list_aliases.return_value = {\n \"Aliases\": [ {\"AliasName\": \"alias/{}\".format(service_key_alias)} ]\n }\n aliases = ef_utils.kms_key_alias(self.mock_kms, key_arn)\n self.assertIn(service_key_alias, aliases)\n self.mock_kms.list_aliases.assert_called_once_with(KeyId=key_arn)", "def _get_key_pair_by_id(key_pair_id):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n key_pairs = ec2_client.get_all_key_pairs(keynames=key_pair_id)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return key_pairs[0] if key_pairs else None", "def test_get_test_organization_api_key(self):\n pass", "def get_api_key(context) -> str:\n provided_api_key = \"\"\n for key, value in context.invocation_metadata():\n if key == \"api_key\":\n provided_api_key = str(value)\n return provided_api_key\n return provided_api_key", "def test_get_single(single_bucket): # pylint: disable=redefined-outer-name\n returned_value = single_bucket.get(\"key 1\")\n\n assert returned_value == \"value 1\"", "def check_key_pair(ec2, kp_name):\n if not [i for i in ec2.get_all_key_pairs() if str(i).split(':')[1] == kp_name]:\n sys.stderr.write(\"Key pair: {} does not exist, please import_key_pair prior to running.\\n\".format(kp_name))\n sys.exit(1)", "def get_keypair_keypath ( aws_account_type ) :\n return '/keypairs/' + aws_account_type + '/'", "def test_get(self):\n key = self.key_gen.get()\n key2 = self.key_gen.get()\n\n self.assertEqual(key, key2 - 1)" ]
[ "0.87272215", "0.77711517", "0.76828736", "0.69687665", "0.6587534", "0.65302324", "0.6508063", "0.6470722", "0.6401116", "0.6392054", "0.6370844", "0.6345941", "0.63402843", "0.63292104", "0.63100874", "0.62989324", "0.6209678", "0.6179603", "0.61722237", "0.61598915", "0.6119571", "0.6096574", "0.60903794", "0.60766333", "0.60737795", "0.6046647", "0.6041812", "0.6014681", "0.60075915", "0.60044754" ]
0.9449849
0
Test case for aws_service_api_keypair_import_post
def test_aws_service_api_keypair_import_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_keypair_generate_post(self):\n pass", "def test_aws_service_api_keypair_get(self):\n pass", "def test_aws_service_api_keypair_delete(self):\n pass", "def test_aws_service_api_keypairs_get(self):\n pass", "def test_create_api_key(self):\n pass", "def ex_import_keypair(self, name, keyfile):\n\n base64key = base64.b64encode(open(os.path.expanduser(keyfile)).read())\n\n params = {'Action': 'ImportKeyPair',\n 'KeyName': name,\n 'PublicKeyMaterial': base64key\n }\n\n response = self.connection.request(self.path, params=params).object\n key_name = self._findtext(response, 'keyName')\n key_fingerprint = self._findtext(response, 'keyFingerprint')\n return {\n 'keyName': key_name,\n 'keyFingerprint': key_fingerprint,\n }", "def test_modify_import_data_1(self):\n result = tickets.modify_import_data(self.ticket_dict2,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)", "def import_key_pair(self, key_name, public_key_material):\r\n public_key_material = base64.b64encode(public_key_material)\r\n params = {'KeyName' : key_name,\r\n 'PublicKeyMaterial' : public_key_material}\r\n return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')", "def do_importkey(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n key = self.card.get_seed_pubkey()\n key_fmted = self.format_pubkey(key)\n\n print('\\nImportKey: %s' % key_fmted)\n return self.return_code(0)", "def test_azure_import(self):\n\n uri, pubkey = AzureSigner.import_(\"fsn-vault-1\", \"ec-key-1\")\n\n self.assertEqual(pubkey, self.azure_pubkey)\n self.assertEqual(uri, self.azure_id)", "def import_key_pair(self, key_name, public_key_material):\n response = key_pair.import_key_pair(self.url, self.verb, self.headers,\n self.version, key_name, public_key_material)\n if response is not None :\n res = ImportKeyPairsResponse.ImportKeyPairsResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def import_project_dump(self, key):", "def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n self.assertEqual(key.get_contents_as_string(), datastr)\n self.assertEqual(key.get_metadata('name'), package.name)\n self.assertEqual(key.get_metadata('version'), package.version)", "def import_key(\n key_name, public_key_material, region=None, key=None, keyid=None, profile=None\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n key = conn.import_key_pair(key_name, public_key_material)\n log.debug(\"the key to return is : %s\", key)\n return key.fingerprint\n except boto.exception.BotoServerError as e:\n log.debug(e)\n return False", "def test_modify_import_data_3(self):\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"host_genus\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"subcluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"accession\"], \"parse\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"type\"], \"add\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"description_field\"], \"product\")\n with self.subTest():\n self.assertEqual(self.ticket_dict3[\"eval_mode\"], \"final\")", "def key_upload(self, key=None):\n\n name = key[\"name\"]\n cloud = self.cloud\n Console.msg(f\"upload the key: {name} -> {cloud}\")\n try:\n r = self.cloudman.create_keypair(name, key['public_key'])\n except: # openstack.exceptions.ConflictException:\n raise ValueError(f\"key already exists: {name}\")\n\n return r", "def temp_api_key(cloud):\n payload = {'name': 'pelion_e2e_dynamic_api_key'}\n r = cloud.account.create_api_key(payload, expected_status_code=201)\n resp = r.json()\n\n log.info('Created new developer api key for test case, id: {}'.format(resp['id']))\n\n yield resp\n\n log.info('Cleaning out the generated test case developer api key, id: {}'.format(resp['id']))\n cloud.account.delete_api_key(resp['id'], expected_status_code=204)", "def test_05_sync(\n self,\n getenv,\n aed_list_keys,\n aed_import_key,\n ska_fetch_token,\n ska_list_keys,\n ska_export_key,\n ):\n args = object()\n getenv.return_value = \"\"\n\n # None returned by AED\n aed_list_keys.return_value = []\n\n # Two returned by SmartKey\n ska_list_keys.return_value = [\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T144829Z\",\n \"created_at\": \"20190514T144829Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"ENCRYPT\",\n \"DECRYPT\",\n \"WRAPKEY\",\n \"UNWRAPKEY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n ],\n \"key_size\": 2048,\n \"kid\": \"0b8358fc-1864-4096-9ab9-26fb59e86abe\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"testkey2048\",\n \"never_exportable\": False,\n \"obj_type\": \"RSA\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": rsa2048_pub_pkcs8,\n \"public_only\": False,\n \"rsa\": {\n \"encryption_policy\": [{\"padding\": {\"OAEP\": {\"mgf\": None}}}],\n \"key_size\": 2048,\n \"signature_policy\": [{\"padding\": None}],\n },\n \"state\": \"Active\",\n },\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T195343Z\",\n \"created_at\": \"20190514T195343Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"elliptic_curve\": \"NistP256\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n \"AGREEKEY\",\n ],\n \"kid\": \"6e7b1ebb-7f66-423e-8a57-1074f407341d\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"nist256\",\n \"never_exportable\": False,\n \"obj_type\": \"EC\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": nist256_pub_pkcs8,\n \"public_only\": False,\n \"state\": \"Active\",\n },\n ]\n\n # Export is called twice\n ska_export_key.side_effect = [\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T144829Z\",\n \"created_at\": \"20190514T144829Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"ENCRYPT\",\n \"DECRYPT\",\n \"WRAPKEY\",\n \"UNWRAPKEY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n ],\n \"key_size\": 2048,\n \"kid\": \"0b8358fc-1864-4096-9ab9-26fb59e86abe\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"testkey2048\",\n \"never_exportable\": False,\n \"obj_type\": \"RSA\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": rsa2048_pub_pkcs8,\n \"public_only\": False,\n \"rsa\": {\n \"encryption_policy\": [{\"padding\": {\"OAEP\": {\"mgf\": None}}}],\n \"key_size\": 2048,\n \"signature_policy\": [{\"padding\": None}],\n },\n \"state\": \"Active\",\n \"value\": rsa2048_priv,\n }, # Second key\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T195343Z\",\n \"created_at\": \"20190514T195343Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"elliptic_curve\": \"NistP256\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n \"AGREEKEY\",\n ],\n \"kid\": \"6e7b1ebb-7f66-423e-8a57-1074f407341d\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"nist256\",\n \"never_exportable\": False,\n \"obj_type\": \"EC\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": nist256_pub_pkcs8,\n \"public_only\": False,\n \"state\": \"Active\",\n \"value\": nist256_priv,\n },\n ]\n\n aed_import_key.side_effect = [\n {\"label\": \"testkey2048\", \"type\": \"RSA\"},\n {\"label\": \"nist256\", \"type\": \"EC\"},\n ]\n command_line.cmd_skey_sync_keys(args)\n\n # Make sure export was called twice with the correct kid\n self.assertEqual(ska_export_key.call_count, 2)\n ska_export_key.assert_any_call(\"0b8358fc-1864-4096-9ab9-26fb59e86abe\")\n ska_export_key.assert_any_call(\"6e7b1ebb-7f66-423e-8a57-1074f407341d\")\n\n # Make sure import was called twice with the correct data\n self.assertEqual(aed_import_key.call_count, 2)\n aed_import_key.assert_any_call(\n \"testkey2048\",\n (\n \"-----BEGIN RSA PRIVATE KEY-----\\n\"\n + \"%s\\n-----END RSA PRIVATE KEY-----\\n\"\n )\n % rsa2048_priv,\n )\n\n aed_import_key.assert_any_call(\n \"nist256\",\n (\n \"-----BEGIN EC PARAMETERS-----\\n\"\n + \"BggqhkjOPQMBBw==\\n\"\n + \"-----END EC PARAMETERS-----\\n-----\"\n + \"BEGIN EC PRIVATE KEY-----\\n%s\\n---\"\n + \"--END EC PRIVATE KEY-----\\n\"\n )\n % nist256_priv,\n )", "def test_10_sync_noop(\n self,\n getenv,\n aed_list_keys,\n aed_import_key,\n ska_fetch_token,\n ska_list_keys,\n ska_export_key,\n ):\n args = object()\n getenv.return_value = \"\"\n\n # Two returned by AED\n aed_list_keys.return_value = [\n {\"label\": \"testkey2048\", \"public\": rsa2048_pub, \"type\": \"RSA\"},\n {\"label\": \"nist256\", \"public\": nist256_pub, \"type\": \"EC\"},\n ]\n\n # Two returned by SmartKey\n ska_list_keys.return_value = [\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T144829Z\",\n \"created_at\": \"20190514T144829Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"ENCRYPT\",\n \"DECRYPT\",\n \"WRAPKEY\",\n \"UNWRAPKEY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n ],\n \"key_size\": 2048,\n \"kid\": \"0b8358fc-1864-4096-9ab9-26fb59e86abe\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"testkey2048\",\n \"never_exportable\": False,\n \"obj_type\": \"RSA\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": rsa2048_pub_pkcs8,\n \"public_only\": False,\n \"rsa\": {\n \"encryption_policy\": [{\"padding\": {\"OAEP\": {\"mgf\": None}}}],\n \"key_size\": 2048,\n \"signature_policy\": [{\"padding\": None}],\n },\n \"state\": \"Active\",\n },\n {\n \"acct_id\": \"79f56d41-d52c-4747-a32b-06670967f02e\",\n \"activation_date\": \"20190514T195343Z\",\n \"created_at\": \"20190514T195343Z\",\n \"creator\": {\"user\": \"a7fa826c-b553-4d63-93e7-9f5af2a44f63\"},\n \"description\": \"\",\n \"elliptic_curve\": \"NistP256\",\n \"enabled\": True,\n \"group_id\": \"4dbe167a-8e58-43b9-922a-4ac6f94c052a\",\n \"key_ops\": [\n \"SIGN\",\n \"VERIFY\",\n \"EXPORT\",\n \"APPMANAGEABLE\",\n \"AGREEKEY\",\n ],\n \"kid\": \"6e7b1ebb-7f66-423e-8a57-1074f407341d\",\n \"lastused_at\": \"19700101T000000Z\",\n \"name\": \"nist256\",\n \"never_exportable\": False,\n \"obj_type\": \"EC\",\n \"origin\": \"FortanixHSM\",\n \"pub_key\": nist256_pub_pkcs8,\n \"public_only\": False,\n \"state\": \"Active\",\n },\n ]\n\n # Export is called twice\n command_line.cmd_skey_sync_keys(args)\n\n # Make sure no action was taken\n ska_export_key.assert_not_called()\n aed_import_key.assert_not_called()", "def test_delete_api_key(self):\n pass", "def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)", "def test_api_remote_import_post(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.ModelImport()\n path, method = default_api.api_remote_import_post(params)\n self.assertEqual(path, '/api/remote/import')\n self.assertEqual(method, 'POST')", "def test_valid_keys(client):\n response=client.post(\"/signin\",data=dict(username=TestSignin.email, password=TestSignin.password), content_type=\"multipart/form-data\")\n data=json.loads(response.data)\n assert response.status_code==400\n assert data[\"error\"] == \"Please provide email and password as keys\"", "def test_create_keypair_save_both(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path,\n private_filepath=priv_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)\n\n self.assertTrue(os.path.isfile(priv_file_path))", "def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]", "def test_service_api_predict_missing_keys(service_app):\n tmp_data = copy.deepcopy(data[:1])\n tmp_data[0].pop('x1')\n\n response = service_app.post('/predict',\n data=json.dumps(tmp_data),\n content_type='application/json')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 400\n assert json.loads(response.data) == {'error': \"'x1' is a required property in items -> required\"}", "def test_modify_import_data_2(self):\n self.ticket_dict3[\"extra\"] = \"extra\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)" ]
[ "0.792199", "0.694477", "0.67042994", "0.67018545", "0.61249477", "0.6051414", "0.56280106", "0.5590886", "0.55801374", "0.5567025", "0.5530842", "0.55304706", "0.5523036", "0.54735106", "0.54558766", "0.5437347", "0.5433976", "0.5428403", "0.54215634", "0.54156524", "0.54147375", "0.5399533", "0.53829443", "0.53563064", "0.5349827", "0.53474486", "0.5341584", "0.53371376", "0.53370637", "0.5321674" ]
0.9547715
0
Test case for aws_service_api_keypairs_get
def test_aws_service_api_keypairs_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_keypair_get(self):\n pass", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def test_aws_service_api_keypair_delete(self):\n pass", "def test_aws_service_api_keypair_import_post(self):\n pass", "def get(self, name):\n path = '/os-keypairs/%s' % name\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pair %s: %s' % (name, truncate(res)))\n return res[0]['keypair']", "def test_get_keys(self):\n response = self.client.get_keys()\n assert isinstance(response, dict)\n assert 'public' in response\n assert 'private' in response\n assert response['public'] is not None\n assert response['private'] is not None", "def describe_key_pairs(self):\n response = key_pair.describe_key_pairs(self.url, self.verb,\n self.headers, self.version)\n if response is not None :\n res = DescribeKeyPairsResponse.DescribeKeyPairsResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def _get_key_pair_by_id(key_pair_id):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n key_pairs = ec2_client.get_all_key_pairs(keynames=key_pair_id)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return key_pairs[0] if key_pairs else None", "def test_get_user_api_keys(self):\n pass", "def getAwsKeypair(directory=None):\n if directory is None:\n directory = './'\n with open(directory + 'access.key', 'r+') as fp:\n access_key = fp.read()\n with open(directory + 'secret.key', 'r+') as fp:\n secret_key = fp.read()\n return (access_key, secret_key)", "def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)", "def test_aws_service_api_interfaces_get(self):\n pass", "def test_get_vault_pubkeys(self):\n pass", "def test_get_public_key(self):\n query_string = [('agentid', 'false'),\n ('companyid', 'false')]\n response = self.client.open(\n '/v0_9_1/PublicKeys',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n for key, value in pairs.items():\n tempconfig.write(\"{0}: {1}\\n\".format(\n key, value).encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, \"Spam\"), value)\n for key, value in pairs.items():\n self.assertEqual(config.getRequiredKey(key), value)\n finally:\n os.remove(tempconfig.name)", "def keypair_lookup(session):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_key_pairs()\n\n # If SSH_KEY exists and points to a valid Key Pair, use it\n key = os.environ.get(\"SSH_KEY\", None) # reuse bastion.py env vars\n if key is not None:\n kp_name = os.path.basename(key)\n if kp_name.endswith(\".pem\"):\n kp_name = kp_name[:-4]\n for kp in response['KeyPairs']:\n if kp[\"KeyName\"] == kp_name:\n return kp_name\n\n print(\"Key Pairs\")\n for i in range(len(response['KeyPairs'])):\n print(\"{}: {}\".format(i, response['KeyPairs'][i]['KeyName']))\n if len(response['KeyPairs']) == 0:\n return None\n while True:\n try:\n idx = input(\"[0]: \")\n idx = int(idx if len(idx) > 0 else \"0\")\n return response['KeyPairs'][idx]['KeyName']\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n print(\"Invalid Key Pair number, try again\")", "def check_key_pair(ec2, kp_name):\n if not [i for i in ec2.get_all_key_pairs() if str(i).split(':')[1] == kp_name]:\n sys.stderr.write(\"Key pair: {} does not exist, please import_key_pair prior to running.\\n\".format(kp_name))\n sys.exit(1)", "def test_create_api_key(self):\n pass", "def key_pairs(self):\n return self.get('key_pairs')", "def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")", "def get_keypair_keypath ( aws_account_type ) :\n return '/keypairs/' + aws_account_type + '/'", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_get_cloud_organization_api_key(self):\n pass", "def GetKeyPairs(self):\n\n sql = \"\"\"select keypair_id as ID, \n keypair_name as Name, \n case when ifnull(private_key, \"\") != \"\" then 'true' else 'false' end as HasPrivateKey,\n case when ifnull(passphrase, \"\") != \"\" then 'true' else 'false' end as HasPassphrase\n from clouds_keypair\n where cloud_id = %s\"\"\"\n db = catocommon.new_conn()\n rows = db.select_all_dict(sql, (self.ID))\n # making it a list instead of a tuple for downstream use as JSON\n self.KeyPairs = list(rows) if rows else []\n db.close()\n return self.KeyPairs", "def test_keys(app, client):\n response = client.get(\"/keys\")\n assert response.status_code == 200\n assert len(response.json[\"keys\"]) > 0", "def test_aws_keys_from_env():\n\n # Init variables\n ds = nio.DataSink()\n aws_access_key_id = \"ABCDACCESS\"\n aws_secret_access_key = \"DEFGSECRET\"\n\n # Set env vars\n os.environ[\"AWS_ACCESS_KEY_ID\"] = aws_access_key_id\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = aws_secret_access_key\n\n # Call function to return creds\n access_key_test, secret_key_test = ds._return_aws_keys()\n\n # Assert match\n assert aws_access_key_id == access_key_test\n assert aws_secret_access_key == secret_key_test", "def download_keypair ( s3_infra_conn, aws_account_type, region_name, keypair_type ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keypair_type )\n keypair_bucket = get_admin_bucket_name( region_name = region_name )\n return retrieve_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = keypair_bucket,\n key_name = get_keypair_keypath( aws_account_type ) + get_keypair_keyname( keypair_name ),\n stored_filename = keypair_name )", "def test_get_kms_key_alias(self):\n service_key_alias = 'service-name'\n key_arn = 'random-ARN'\n self.mock_kms.list_aliases.return_value = {\n \"Aliases\": [ {\"AliasName\": \"alias/{}\".format(service_key_alias)} ]\n }\n aliases = ef_utils.kms_key_alias(self.mock_kms, key_arn)\n self.assertIn(service_key_alias, aliases)\n self.mock_kms.list_aliases.assert_called_once_with(KeyId=key_arn)", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass" ]
[ "0.89948696", "0.7429849", "0.72318494", "0.68253005", "0.6428975", "0.637614", "0.60023105", "0.59939665", "0.5983117", "0.59761864", "0.59480745", "0.5947974", "0.59439266", "0.5929136", "0.58903056", "0.58107954", "0.5772948", "0.57086974", "0.569662", "0.5693239", "0.5678805", "0.5666426", "0.560145", "0.55878663", "0.5572533", "0.55605495", "0.55381715", "0.55295795", "0.55159813", "0.5490128" ]
0.9444123
0
Test case for aws_service_api_network_subnets_get
def test_aws_service_api_network_subnets_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subnets(self):\n url = '%s/v2.0/subnets' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['subnets']\n else:\n LOG.error('Get subnets failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_list_subnets(self):\n print(self.the_client.list_subnets())", "def subnets(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subnets\")", "def test_get_subnet(self):\n self.assertEqual(\n type(self.the_client.get_subnet(subnet_id)),\n baidubce.bce_response.BceResponse)", "def list_subnet(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing subnet.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"subnet List : %s \" % output)\n return output[\"subnets\"]", "def test_subnets():\n with patch.object(\n salt.utils.network, \"subnets\", MagicMock(return_value=\"10.1.1.0/24\")\n ):\n assert win_network.subnets() == \"10.1.1.0/24\"", "def subnets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> Sequence[str]:\n return pulumi.get(self, \"subnets\")", "def test_unordered_subnets(self):\n\n self._test_find_next_subnet(\n network=\"172.31.0.0/16\",\n subnets=[\"172.31.48.0/20\", \"172.31.0.0/20\", \"172.31.16.0/20\", \"172.31.32.0/20\"],\n requests=[24],\n expected=[\"172.31.64.0/24\"],\n )", "def get(self, oid=None, name=None):\n if oid is not None:\n path = '%s/subnets/%s' % (self.ver, oid)\n elif name is not None:\n path = '%s/subnets?display_name=%s' % (self.ver, name)\n else:\n raise OpenstackError('Specify at least subnet id or name')\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack subnets: %s' % truncate(res))\n if oid is not None:\n server = res[0]['subnet']\n elif name is not None:\n server = res[0]['subnets'][0]\n \n return server", "def get_subnet_details(self, subnet_name=\"dummy_subnet\", subnet_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n if result is None:\n LOG_OBJ.error(\"No response from Server while getting subnets\")\n return result\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet details Failed with status %s \" %\n result.status)\n return result.status\n\n output = json.loads(result.data)\n\n for subnets in output['subnets']:\n if (subnet_id is not None and (subnets['id'] == subnet_id)) or\\\n subnets['name'].lower() == subnet_name.lower():\n LOG_OBJ.debug(\"Subnet Details: %s\" % subnets)\n return subnets\n\n LOG_OBJ.error(\"Subnet with name:%s or with id:%s is Not Found\" %\n (subnet_name, subnet_id))", "def list_subnets(self, retrieve_all=True, **_params):\r\n return self.list('subnets', self.subnets_path, retrieve_all,\r\n **_params)", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def test_create_subnet(self):\n client_token = generate_client_token()\n subnet_name = 'test_subnet_name1' + client_token\n subnet_cidr = '192.168.0.64/26'\n self.assertEqual(\n type(self.the_client.create_subnet(subnet_name,\n 'cn-bj-a',\n subnet_cidr,\n vpc_id,\n client_token=client_token)),\n baidubce.bce_response.BceResponse)", "def get_subnet(availability_zone: Optional[str] = None,\n availability_zone_id: Optional[str] = None,\n cidr_block: Optional[str] = None,\n default_for_az: Optional[bool] = None,\n filters: Optional[Sequence[pulumi.InputType['GetSubnetFilterArgs']]] = None,\n id: Optional[str] = None,\n ipv6_cidr_block: Optional[str] = None,\n state: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n vpc_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult:\n __args__ = dict()\n __args__['availabilityZone'] = availability_zone\n __args__['availabilityZoneId'] = availability_zone_id\n __args__['cidrBlock'] = cidr_block\n __args__['defaultForAz'] = default_for_az\n __args__['filters'] = filters\n __args__['id'] = id\n __args__['ipv6CidrBlock'] = ipv6_cidr_block\n __args__['state'] = state\n __args__['tags'] = tags\n __args__['vpcId'] = vpc_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:ec2/getSubnet:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value\n\n return AwaitableGetSubnetResult(\n arn=pulumi.get(__ret__, 'arn'),\n assign_ipv6_address_on_creation=pulumi.get(__ret__, 'assign_ipv6_address_on_creation'),\n availability_zone=pulumi.get(__ret__, 'availability_zone'),\n availability_zone_id=pulumi.get(__ret__, 'availability_zone_id'),\n available_ip_address_count=pulumi.get(__ret__, 'available_ip_address_count'),\n cidr_block=pulumi.get(__ret__, 'cidr_block'),\n customer_owned_ipv4_pool=pulumi.get(__ret__, 'customer_owned_ipv4_pool'),\n default_for_az=pulumi.get(__ret__, 'default_for_az'),\n enable_dns64=pulumi.get(__ret__, 'enable_dns64'),\n enable_lni_at_device_index=pulumi.get(__ret__, 'enable_lni_at_device_index'),\n enable_resource_name_dns_a_record_on_launch=pulumi.get(__ret__, 'enable_resource_name_dns_a_record_on_launch'),\n enable_resource_name_dns_aaaa_record_on_launch=pulumi.get(__ret__, 'enable_resource_name_dns_aaaa_record_on_launch'),\n filters=pulumi.get(__ret__, 'filters'),\n id=pulumi.get(__ret__, 'id'),\n ipv6_cidr_block=pulumi.get(__ret__, 'ipv6_cidr_block'),\n ipv6_cidr_block_association_id=pulumi.get(__ret__, 'ipv6_cidr_block_association_id'),\n ipv6_native=pulumi.get(__ret__, 'ipv6_native'),\n map_customer_owned_ip_on_launch=pulumi.get(__ret__, 'map_customer_owned_ip_on_launch'),\n map_public_ip_on_launch=pulumi.get(__ret__, 'map_public_ip_on_launch'),\n outpost_arn=pulumi.get(__ret__, 'outpost_arn'),\n owner_id=pulumi.get(__ret__, 'owner_id'),\n private_dns_hostname_type_on_launch=pulumi.get(__ret__, 'private_dns_hostname_type_on_launch'),\n state=pulumi.get(__ret__, 'state'),\n tags=pulumi.get(__ret__, 'tags'),\n vpc_id=pulumi.get(__ret__, 'vpc_id'))", "def list(self, tenant=None, network=None, gateway_ip=None, cidr=None):\n path = '%s/subnets' % self.ver \n \n query = {}\n if tenant is not None:\n query['tenant_id'] = tenant\n if network is not None:\n query['network_id'] = network\n if gateway_ip is not None:\n query['gateway_ip '] = gateway_ip\n if cidr is not None:\n query['cidr '] = cidr \n path = '%s?%s' % (path, urlencode(query))\n \n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack subnets: %s' % truncate(res))\n return res[0]['subnets']", "def test_use_subnets_in_only_one_vpc(iam_client_stub, ec2_client_stub):\n stubs.configure_iam_role_default(iam_client_stub)\n stubs.configure_key_pair_default(ec2_client_stub)\n\n # Add a response with a thousand subnets all in different VPCs.\n # After filtering, only subnet in one particular VPC should remain.\n # Thus head_node.SubnetIds and worker_nodes.SubnetIds should end up as\n # being length-one lists after the bootstrap_config.\n stubs.describe_a_thousand_subnets_in_different_vpcs(ec2_client_stub)\n\n # describe the subnet in use while determining its vpc\n stubs.describe_subnets_echo(ec2_client_stub, DEFAULT_SUBNET)\n # given no existing security groups within the VPC...\n stubs.describe_no_security_groups(ec2_client_stub)\n # expect to create a security group on the VPC\n stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG)\n # expect new security group details to be retrieved after creation\n stubs.describe_sgs_on_vpc(\n ec2_client_stub,\n [DEFAULT_SUBNET[\"VpcId\"]],\n [DEFAULT_SG],\n )\n\n # given no existing default security group inbound rules...\n # expect to authorize all default inbound rules\n stubs.authorize_sg_ingress(\n ec2_client_stub,\n DEFAULT_SG_WITH_RULES,\n )\n\n # expect another call to describe the above security group while checking\n # a second time if it has ip_permissions set (\"if not sg.ip_permissions\")\n stubs.describe_an_sg_2(\n ec2_client_stub,\n DEFAULT_SG_WITH_RULES,\n )\n\n # given our mocks and an example config file as input...\n # expect the config to be loaded, validated, and bootstrapped successfully\n config = helpers.bootstrap_aws_example_config_file(\"example-full.yaml\")\n _get_vpc_id_or_die.cache_clear()\n\n # We've filtered down to only one subnet id -- only one of the thousand\n # subnets generated by ec2.subnets.all() belongs to the right VPC.\n assert config[\"head_node\"][\"SubnetIds\"] == [DEFAULT_SUBNET[\"SubnetId\"]]\n assert config[\"worker_nodes\"][\"SubnetIds\"] == [DEFAULT_SUBNET[\"SubnetId\"]]\n # Check that the security group has been filled correctly.\n assert config[\"head_node\"][\"SecurityGroupIds\"] == [DEFAULT_SG[\"GroupId\"]]\n assert config[\"worker_nodes\"][\"SecurityGroupIds\"] == [\n DEFAULT_SG[\"GroupId\"]\n ]", "def test_vpc_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_vpc_data(neo4j_session)\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})-[:RESOURCE]->(subnet:GCPSubnet)\n RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,\n subnet.private_ip_google_access\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {\n (\n n['vpc.id'],\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/global/networks/default',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n ),\n }\n assert actual_nodes == expected_nodes", "def test_transform_and_load_subnets(neo4j_session):\n subnet_res = tests.data.gcp.compute.VPC_SUBNET_RESPONSE\n subnet_list = cartography.intel.gcp.compute.transform_gcp_subnets(subnet_res)\n cartography.intel.gcp.compute.load_gcp_subnets(neo4j_session, subnet_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(subnet:GCPSubnet)\n RETURN subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range, subnet.private_ip_google_access,\n subnet.vpc_partial_uri\n \"\"\"\n nodes = neo4j_session.run(query)\n actual_nodes = {\n (\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n n['subnet.vpc_partial_uri'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n 'projects/project-abc/global/networks/default',\n ),\n }\n assert actual_nodes == expected_nodes", "def test_list_host_subnet(self):\n pass", "def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")", "def list_subnets(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.list_subnets(**kwargs)", "def list_namespaced_host_subnet(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_host_subnet\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/hostsubnets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1HostSubnetList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_read_host_subnet(self):\n pass", "def test_eks_cluster_vpc_subnet(self) -> None:\n cluster = self.eks.describe_cluster(name='andrew-jarombek-eks-v2').get('cluster')\n cluster_vpc: str = cluster.get('resourcesVpcConfig').get('vpcId')\n cluster_subnets: list = cluster.get('resourcesVpcConfig').get('subnetIds')\n\n kubernetes_vpc = VPC.get_vpcs('application-vpc')\n self.assertEqual(1, len(kubernetes_vpc))\n\n self.assertEqual(kubernetes_vpc[0].get('VpcId'), cluster_vpc)\n\n kubernetes_dotty_subnet = VPC.get_subnets('kubernetes-dotty-public-subnet')\n kubernetes_grandmas_blanket_subnet = VPC.get_subnets('kubernetes-grandmas-blanket-public-subnet')\n self.assertEqual(1, len(kubernetes_dotty_subnet))\n self.assertEqual(1, len(kubernetes_grandmas_blanket_subnet))\n\n self.assertListEqual(\n [kubernetes_dotty_subnet[0].get('SubnetId'), kubernetes_grandmas_blanket_subnet[0].get('SubnetId')],\n cluster_subnets\n )", "def list_subnets(self, filters=None):\n # If the cloud is running nova-network, just return an empty list.\n if not self.has_service('network'):\n return []\n\n # Translate None from search interface to empty {} for kwargs below\n if not filters:\n filters = {}\n return list(self.network.subnets(**filters))" ]
[ "0.8326366", "0.8207443", "0.77458423", "0.773097", "0.76437336", "0.7635327", "0.75836563", "0.747489", "0.73467875", "0.73467875", "0.731024", "0.72891515", "0.7184063", "0.7079082", "0.70580095", "0.69772714", "0.6942426", "0.6940601", "0.6905459", "0.68559116", "0.6825933", "0.6791661", "0.67525864", "0.67460626", "0.672756", "0.6707606", "0.670289", "0.66763365", "0.66631854", "0.66418815" ]
0.9566142
0
Test case for aws_service_api_networks_get
def test_aws_service_api_networks_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_networks(self):\n pass", "def test_get_network(self):\n pass", "def get_networks(self):\n url = '%s/v2.0/networks' % self.catalog['network']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['networks']\n else:\n LOG.error('Get networks failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]", "def test_networking_project_network_get(self):\n pass", "def get_networks() -> dict:\n nets_rq = request(\n method=\"GET\", url=app.config[\"NETWORKS_REF\"], headers=build_header()\n )\n\n if not nets_rq:\n raise HTTPError(nets_rq.status_code)\n\n return nets_rq.json()", "def test_networking_project_network_service_get(self):\n pass", "def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']", "def test_networking_project_network_list(self):\n pass", "def test_get_unregistered_networks(self):\n pass", "def test_list_lab_networks(self, authenticated_client, lab_path):\n r = authenticated_client.api.list_lab_networks(lab_path)\n assert r[\"data\"] is not None", "def get(self, context, region_id, filters):\n networks_obj = dbapi.networks_get_by_region(\n context, region_id, filters)\n return jsonutils.to_primitive(networks_obj), 200, None", "def test_aws_service_api_network_subnets_get(self):\n pass", "def show_networks():\n return get_networks()", "def test_networking_project_network_tag_get(self):\n pass", "def getNetworksDetails(network_id):\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url + \"/\" + network_id)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"network\"]", "def list_networks(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('networks', self.networks_path, retrieve_all,\r\n **_params)", "def test_retrieve_networks(site):\n models.Attribute.objects.create(\n site=site, resource_name='Network', name='test'\n )\n\n net_8 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/8', attributes={'test': 'foo'}\n )\n net_24 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/24', attributes={'test': 'bar'}\n )\n net_25 = models.Network.objects.create(\n site=site, cidr=u'10.0.0.0/25', attributes={'test': 'baz'}\n )\n ip = models.Network.objects.create(\n site=site, cidr=u'10.0.0.1/32'\n )\n\n # root=True\n assert list(site.networks.filter(parent_id=None)) == [net_8]\n\n # include_networks=True, include_ips=Fals\n assert list(site.networks.filter(is_ip=False)) == [net_8, net_24, net_25]\n\n # include_networks=False, include_ips=False\n assert list(site.networks.none()) == []\n\n # include_networks=True, include_ips=True\n assert list(site.networks.all()) == [net_8, net_24, net_25, ip]\n\n # include_networks=False, include_ips=True\n assert list(site.networks.filter(is_ip=True)) == [ip]\n\n # Filter by attribute\n assert list(site.networks.by_attribute(None, 'foo')) == []\n assert list(site.networks.by_attribute('test', 'foo')) == [net_8]\n\n # Get by address\n assert site.networks.get_by_address(u'10.0.0.0/8') == net_8\n\n #\n # .get_closest_parent()\n #\n # Closest parent for non-existent 10.0.0.128/32 network should be /24\n assert site.networks.get_closest_parent(u'10.0.0.128/32') == net_24\n\n # Closest parent for non-existent 10.0.0.2/32 network should be /25\n assert site.networks.get_closest_parent(u'10.0.0.2/32') == net_25\n\n # Matching ip with shorter prefix_length should not match\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length=27)\n\n # Non-existent closest parent should error\n with pytest.raises(models.Network.DoesNotExist):\n site.networks.get_closest_parent(u'1.0.0.2/32')\n\n # Invalid prefix_length\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'10.0.0.2/32', prefix_length='shoe')\n\n # Invalid CIDR\n with pytest.raises(exc.ValidationError):\n site.networks.get_closest_parent(u'1')", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_networking_project_network_service_list(self):\n pass", "def networks(self) -> pulumi.Output[Sequence['outputs.NetworkConfigResponse']]:\n return pulumi.get(self, \"networks\")", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def GetNetworks(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n networks = self._SendRequest(HTTP_GET, \"/%s/networks\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return networks\n else:\n return [n[\"name\"] for n in networks]", "def fusion_api_get_ethernet_networks(self, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.get(uri=uri, api=api, headers=headers, param=param)", "def networks(self) -> Sequence['outputs.NetworkConfigResponse']:\n return pulumi.get(self, \"networks\")", "def _test_network_list_paged(\n self, filter_params, expected_networks, page_data,\n source_networks=None, **extra_kwargs):\n filter_params = filter_params or {}\n sort_dir = page_data['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n\n params = filter_params.copy()\n params.update(call_args)\n if page_data.get('marker_id'):\n params.update({'marker': page_data.get('marker_id')})\n extra_kwargs.update({'marker': page_data.get('marker_id')})\n return_values.append(all_networks[0:21])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val, has_more_data, has_prev_data = api.neutron.network_list_paged(\n self.request, page_data, **extra_kwargs)\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data", "def network_list(self, kwargs=None):\n try:\n scode, networks = Rest.get('Network')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(networks) == 0:\n Console.info(\"No network exist\")\n return\n\n n = 1\n e = {}\n data = []\n for network in networks:\n d = {}\n d['Ip'] = network['Ip']\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def test_get_default_network(self):\n pass", "def get_net_details(self, net_name=\"dummy_net\", net_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n if result is None:\n LOG_OBJ.error(\"No response from Server while listing the nets\")\n return result.status\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n output = json.loads(result.data)\n\n for nets in output['networks']:\n if (net_id is not None and (nets['id'] == net_id)) or \\\n nets['name'].lower() == net_name.lower():\n LOG_OBJ.debug(\"Net details : %s \" % nets)\n return nets\n\n LOG_OBJ.debug(\"Network with name:%s or with ID:%s is Not Found\" %\n (net_name, net_id))", "def validate_networks(self, context, requested_networks):\n args = {'networks': requested_networks}\n return rpc.call(context, FLAGS.network_topic,\n {'method': 'validate_networks',\n 'args': args})" ]
[ "0.8384454", "0.7379086", "0.7318698", "0.7318663", "0.7294449", "0.7183769", "0.69510686", "0.6810491", "0.67909086", "0.6770815", "0.6674351", "0.6660264", "0.66253245", "0.66232175", "0.6577437", "0.6521967", "0.651514", "0.65070605", "0.650605", "0.6473854", "0.6383238", "0.63828444", "0.6360154", "0.6340467", "0.63177603", "0.6310995", "0.6300146", "0.62887603", "0.6275695", "0.6266239" ]
0.95457506
0
Test case for aws_service_api_private_image_get
def test_aws_service_api_private_image_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_public_images_get(self):\n pass", "def get_image(event, context):\n try:\n bucket_key = event['pathParameters']['bucket_key']\n except KeyError:\n return _bad_request_error('No bucket key')\n try:\n session_token= event['headers']['s3-session-token']\n access_key_id = event['headers']['s3-access-key-id']\n secret_access_key = event['headers']['s3-access-key']\n except KeyError:\n return _non_authorized_error(\n 'Not enough credentials to authorize')\n try:\n image = SafeImage(\n session_token=session_token,\n access_key_id=access_key_id,\n secret_access_key=secret_access_key).retrieve(bucket_key)\n except ClientError as e:\n # TODO: log here\n print(e)\n return _non_authorized_error(\n 'Invalid credentials')\n return _success_image(image)", "def random_private_image() -> dict:\n high = DB.private_count()\n random_id = rng.integers(low=1, high=high, size=1)[0]\n return DB.private_image_by_id(int(random_id))", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_read_image(self):\n pass", "def test_get_image_url(self, mock_get):\n\n image_page_url = self.test_data[\"image_page_url\"]\n image_page_response = self.test_data[\"image_page_response\"]\n image_tag_src = self.test_data[\"image_tag_src\"]\n \n mock_get.return_value = self._build_mock_response(text = image_page_response)\n\n self.assertEqual(self.retriever._get_image_url(image_page_url), \\\n image_tag_src, msg = \"Unable to extract image tag from the image page\")", "def private_image_by_id(img_id: int) -> dict:\n doc = DB.client.execute(gql(private_image_by_index), variable_values={\"_eq\": img_id})\n doc = doc['flickr_private'][0]\n return doc", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def test_get_image_exists_not(self):\n with self.assertRaises(errors.NotFound):\n self.docker.images.get(\"image_does_not_exists\")", "async def test_get_image(opp, utcnow):\n helper = await setup_test_component(opp, create_camera)\n image = await camera.async_get_image(opp, helper.entity_id)\n assert image.content == base64.b64decode(FAKE_CAMERA_IMAGE)", "def test_list_image_metadata(self):\n pass", "def test_create_image_signature(self):\n pass", "def image_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_image(**kwargs)", "def test_read_namespaced_image_stream_image(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_resource_user_resource_get_avatar_file_get(self):\n pass", "def test_list_image(self):\n pass", "def horizon_server_with_private_image(horizon_servers_with_private_image):\n return horizon_servers_with_private_image[0]", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def test_create_image(self):\n pass", "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_search_bogus_image(self):\n with self.assertRaises(errors.APIError):\n self.docker.images.search(\"bogus/bogus\")", "def test_tag_image_duplicate(self):\n\n message = {\n \"method\": \"build_image\",\n \"params\": {\"url\": self.url,\n \"tag_image\": self.tag_image}\n }\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"error\")", "def test_read_namespaced_image_stream(self):\n pass" ]
[ "0.9268567", "0.8515992", "0.8399844", "0.81762105", "0.6704678", "0.66913", "0.64894724", "0.64272404", "0.63345486", "0.62184286", "0.6190606", "0.6187278", "0.6182644", "0.61573607", "0.61392295", "0.6135909", "0.61230046", "0.6112058", "0.60774755", "0.6073487", "0.6015454", "0.6008157", "0.60009766", "0.59822553", "0.5976703", "0.59746623", "0.59674215", "0.5950143", "0.58848184", "0.58667225" ]
0.9579085
0
Test case for aws_service_api_private_images_get
def test_aws_service_api_private_images_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_public_images_get(self):\n pass", "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def random_private_image() -> dict:\n high = DB.private_count()\n random_id = rng.integers(low=1, high=high, size=1)[0]\n return DB.private_image_by_id(int(random_id))", "def list_images(ec2): # pragma: no coverage\n response = ec2.describe_images(Filters=[{'Name': 'is-public',\n 'Values': ['false']}])\n response.pop('ResponseMetadata')\n printy(\"{:12}\\t{:20}\\t\\tCreationDate:\".format(\"ImageId\", \"Name\"))\n\n for image in response['Images']:\n if len(image[\"Name\"]) > 20:\n image['Name'] = image['Name'][:20] + \"...\"\n print(\"{ImageId}\\t{Name:20}\\t\\t{CreationDate}\".format(**image))", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def private_image_by_id(img_id: int) -> dict:\n doc = DB.client.execute(gql(private_image_by_index), variable_values={\"_eq\": img_id})\n doc = doc['flickr_private'][0]\n return doc", "def get_images(owner, tagvalue):\n try:\n images = ec2(credentials).describe_images(Owners=[owner],Filters=[{'Name':'tag-value', 'Values':[tagvalue]}])\n return images\n except Exception as e:\n print(\"Error: cannot get the list of images. %s\" % e)", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_list_image_metadata(self):\n pass", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_list_image(self):\n pass", "def get_image(event, context):\n try:\n bucket_key = event['pathParameters']['bucket_key']\n except KeyError:\n return _bad_request_error('No bucket key')\n try:\n session_token= event['headers']['s3-session-token']\n access_key_id = event['headers']['s3-access-key-id']\n secret_access_key = event['headers']['s3-access-key']\n except KeyError:\n return _non_authorized_error(\n 'Not enough credentials to authorize')\n try:\n image = SafeImage(\n session_token=session_token,\n access_key_id=access_key_id,\n secret_access_key=secret_access_key).retrieve(bucket_key)\n except ClientError as e:\n # TODO: log here\n print(e)\n return _non_authorized_error(\n 'Invalid credentials')\n return _success_image(image)", "def test_get_image_url(self, mock_get):\n\n image_page_url = self.test_data[\"image_page_url\"]\n image_page_response = self.test_data[\"image_page_response\"]\n image_tag_src = self.test_data[\"image_tag_src\"]\n \n mock_get.return_value = self._build_mock_response(text = image_page_response)\n\n self.assertEqual(self.retriever._get_image_url(image_page_url), \\\n image_tag_src, msg = \"Unable to extract image tag from the image page\")", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def test_get_image_exists_not(self):\n with self.assertRaises(errors.NotFound):\n self.docker.images.get(\"image_does_not_exists\")", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_list_namespaced_image_stream(self):\n pass", "def test_get_private(self):\n owner = create_user('owner')\n create_snippet('foo', private=True, owner=owner)\n expected = [0, 0, 1, 1]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n self.check_for_users(check, owner)", "def test_ami_exists(self) -> None:\n owner = self.sts.get_caller_identity().get('Account')\n amis = self.ec2_client.describe_images(\n Owners=[owner],\n Filters=[{\n 'Name': 'name',\n 'Values': ['saints-xctf-web-server*']\n }]\n )\n self.assertTrue(len(amis.get('Images')) > 0)", "def glance_list_owned_public_images(glance, owner_id, image_info):\n\n images = []\n list_kwargs = {'filters': {'visibility': 'public', 'owner': owner_id}}\n public_owned_images = glance.images.list(**list_kwargs)\n for image in public_owned_images:\n # only images with the \"same\" name ('TOTO' matches 'test_TOTO' or 'TOTO - 2016-10-03')\n if image_info['image_name'] in image.name:\n images.append(image)\n return images", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_search_bogus_image(self):\n with self.assertRaises(errors.APIError):\n self.docker.images.search(\"bogus/bogus\")", "def test_read_image(self):\n pass", "def getimgs():", "def list_images(self):\n raise NotImplementedError()" ]
[ "0.9245754", "0.8509919", "0.8128298", "0.7955235", "0.67987007", "0.67778707", "0.66492873", "0.65181035", "0.6443877", "0.6395238", "0.63889945", "0.6366931", "0.6331277", "0.6261341", "0.622767", "0.6148064", "0.6132572", "0.61236227", "0.6058544", "0.6032558", "0.6021095", "0.60132325", "0.59955305", "0.5988964", "0.5955115", "0.59331715", "0.58976424", "0.588883", "0.58840686", "0.5883624" ]
0.9565417
0
Test case for aws_service_api_public_image_get
def test_aws_service_api_public_image_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_public_images_get(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def get_image(event, context):\n try:\n bucket_key = event['pathParameters']['bucket_key']\n except KeyError:\n return _bad_request_error('No bucket key')\n try:\n session_token= event['headers']['s3-session-token']\n access_key_id = event['headers']['s3-access-key-id']\n secret_access_key = event['headers']['s3-access-key']\n except KeyError:\n return _non_authorized_error(\n 'Not enough credentials to authorize')\n try:\n image = SafeImage(\n session_token=session_token,\n access_key_id=access_key_id,\n secret_access_key=secret_access_key).retrieve(bucket_key)\n except ClientError as e:\n # TODO: log here\n print(e)\n return _non_authorized_error(\n 'Invalid credentials')\n return _success_image(image)", "def test_get_image_url(self, mock_get):\n\n image_page_url = self.test_data[\"image_page_url\"]\n image_page_response = self.test_data[\"image_page_response\"]\n image_tag_src = self.test_data[\"image_tag_src\"]\n \n mock_get.return_value = self._build_mock_response(text = image_page_response)\n\n self.assertEqual(self.retriever._get_image_url(image_page_url), \\\n image_tag_src, msg = \"Unable to extract image tag from the image page\")", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_read_image(self):\n pass", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def image_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_image(**kwargs)", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def test_read_namespaced_image_stream_image(self):\n pass", "def get_image_output(id: Optional[pulumi.Input[Optional[int]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n slug: Optional[pulumi.Input[Optional[str]]] = None,\n source: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetImageResult]:\n ...", "def test_aws_service_api_snapshots_get(self):\n pass", "def get_image(id: Optional[int] = None,\n name: Optional[str] = None,\n slug: Optional[str] = None,\n source: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImageResult:\n __args__ = dict()\n __args__['id'] = id\n __args__['name'] = name\n __args__['slug'] = slug\n __args__['source'] = source\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('digitalocean:index/getImage:getImage', __args__, opts=opts, typ=GetImageResult).value\n\n return AwaitableGetImageResult(\n created=pulumi.get(__ret__, 'created'),\n description=pulumi.get(__ret__, 'description'),\n distribution=pulumi.get(__ret__, 'distribution'),\n error_message=pulumi.get(__ret__, 'error_message'),\n id=pulumi.get(__ret__, 'id'),\n image=pulumi.get(__ret__, 'image'),\n min_disk_size=pulumi.get(__ret__, 'min_disk_size'),\n name=pulumi.get(__ret__, 'name'),\n private=pulumi.get(__ret__, 'private'),\n regions=pulumi.get(__ret__, 'regions'),\n size_gigabytes=pulumi.get(__ret__, 'size_gigabytes'),\n slug=pulumi.get(__ret__, 'slug'),\n source=pulumi.get(__ret__, 'source'),\n status=pulumi.get(__ret__, 'status'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))", "def get_image_url():", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def get_image(self, request, tenant_id, image_id):\n response_data = get_image(image_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])", "def test_basic_run(self):\n self.expect_datatore_lookup('SomeBlobKey', True)\n self.expect_open_image('SomeBlobKey', (1600, 1200))\n self.expect_resize(blob_image._DEFAULT_SERVING_SIZE)\n self.expect_encode_image('SomeImageInJpeg')\n self.run_request('image/jpeg', 'SomeImageInJpeg')", "def test_list_image_metadata(self):\n pass", "async def test_get_image(opp, utcnow):\n helper = await setup_test_component(opp, create_camera)\n image = await camera.async_get_image(opp, helper.entity_id)\n assert image.content == base64.b64decode(FAKE_CAMERA_IMAGE)", "def test_create_image(self):\n pass", "def test_read_namespaced_image_stream(self):\n pass", "def getimage(self):", "def test_create_image_signature(self):\n pass", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def test_list_image(self):\n pass", "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "async def image(self, tag: Tag, get_bytes: bool=False):\r\n if not isinstance(tag, (str, enumeration.SFWImageTags, enumeration.NSFWImageTags)):\r\n raise TypeError(f\"{Tag} expected in `tag`\")\r\n\r\n if type(get_bytes) is not bool:\r\n raise TypeError(\"bool expected in `get_bytes`\")\r\n\r\n str_tag = tag if type(tag) is str else tag.value\r\n\r\n data_response = await self.http.endpoint(\"img/\" + str_tag)\r\n\r\n data_response[\"tag\"] = tag\r\n\r\n if get_bytes:\r\n image_url = data_response[\"url\"]\r\n image_bytes = await self.http.get_image_bytes(image_url)\r\n data_response[\"bytes\"] = image_bytes\r\n\r\n return result.ImageResult(data_response)" ]
[ "0.9147275", "0.8947318", "0.86610794", "0.8327578", "0.6968766", "0.6962929", "0.68538654", "0.6803162", "0.6783308", "0.6701789", "0.6664229", "0.6662487", "0.66177744", "0.6595421", "0.6537655", "0.6490602", "0.6470058", "0.6434494", "0.6427295", "0.6417345", "0.6409972", "0.63902444", "0.6364396", "0.6328215", "0.6321591", "0.62895626", "0.6283301", "0.62819046", "0.62807286", "0.62564844" ]
0.9514068
0
Test case for aws_service_api_public_images_get
def test_aws_service_api_public_images_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_public_image_get(self):\n pass", "def test_aws_service_api_private_images_get(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def list_images(ec2): # pragma: no coverage\n response = ec2.describe_images(Filters=[{'Name': 'is-public',\n 'Values': ['false']}])\n response.pop('ResponseMetadata')\n printy(\"{:12}\\t{:20}\\t\\tCreationDate:\".format(\"ImageId\", \"Name\"))\n\n for image in response['Images']:\n if len(image[\"Name\"]) > 20:\n image['Name'] = image['Name'][:20] + \"...\"\n print(\"{ImageId}\\t{Name:20}\\t\\t{CreationDate}\".format(**image))", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def test_get_image_url(self, mock_get):\n\n image_page_url = self.test_data[\"image_page_url\"]\n image_page_response = self.test_data[\"image_page_response\"]\n image_tag_src = self.test_data[\"image_tag_src\"]\n \n mock_get.return_value = self._build_mock_response(text = image_page_response)\n\n self.assertEqual(self.retriever._get_image_url(image_page_url), \\\n image_tag_src, msg = \"Unable to extract image tag from the image page\")", "def test_list_image_metadata(self):\n pass", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def list_images(self):\n raise NotImplementedError()", "def get_images():\n return _IMAGES", "def test_list_image(self):\n pass", "def get_images(owner, tagvalue):\n try:\n images = ec2(credentials).describe_images(Owners=[owner],Filters=[{'Name':'tag-value', 'Values':[tagvalue]}])\n return images\n except Exception as e:\n print(\"Error: cannot get the list of images. %s\" % e)", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def get_images(self, page_number):", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def test_list_namespaced_image_stream(self):\n pass", "def test_image_display(self):\n\n result = self.client.get(\"/select_image\")\n\n self.assertIn(b\"/static/uploads/girl-glowing-skin-blue-eyes.jpg\", result.data)", "def get_image(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n image_type = request.GET['image_type']\n image_index = request.GET['image_index']\n\n # Check the DB for an image with the same image_type and id\n images = Images.objects.filter(image_type=image_type, image_index=image_index)\n if not images:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Image doesn't exist\"\n return JsonResponse(collected_values, status=400)\n\n collected_values[\"image_index\"] = images[0].image_index\n collected_values[\"image_id\"] = images[0].iid\n collected_values[\"image_type\"] = images[0].image_type\n collected_values[\"image_category\"] = images[0].image_category\n collected_values[\"link\"] = images[0].link\n collected_values[\"message\"] = images[0].message\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Image Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def test_get_ao_image(self):\n response = self.client.open(\n '/rui-support/ao-image',\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def getimgs():", "def get_image_output(id: Optional[pulumi.Input[Optional[int]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n slug: Optional[pulumi.Input[Optional[str]]] = None,\n source: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetImageResult]:\n ...", "def image_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_image(**kwargs)", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def get_images(stage=0):\n return get_files(stage)[0]", "def test_aws_service_api_snapshots_get(self):\n pass", "def get_image(event, context):\n try:\n bucket_key = event['pathParameters']['bucket_key']\n except KeyError:\n return _bad_request_error('No bucket key')\n try:\n session_token= event['headers']['s3-session-token']\n access_key_id = event['headers']['s3-access-key-id']\n secret_access_key = event['headers']['s3-access-key']\n except KeyError:\n return _non_authorized_error(\n 'Not enough credentials to authorize')\n try:\n image = SafeImage(\n session_token=session_token,\n access_key_id=access_key_id,\n secret_access_key=secret_access_key).retrieve(bucket_key)\n except ClientError as e:\n # TODO: log here\n print(e)\n return _non_authorized_error(\n 'Invalid credentials')\n return _success_image(image)", "def image(image_id):\n\n found = False\n img = None\n \n try:\n for img in api.get_all_images():\n if img.id == image_id:\n found = True\n break\n except Exception:\n logging.error(\"Cannot make API connection to retrieve image info!\")\n\n if not found:\n return None\n\n return img" ]
[ "0.9030294", "0.8608971", "0.84502596", "0.8226284", "0.7452266", "0.6786225", "0.6761363", "0.6706916", "0.6639855", "0.6559341", "0.653849", "0.6529791", "0.65247184", "0.6505588", "0.6494819", "0.64355224", "0.64348966", "0.6434324", "0.6426369", "0.64168745", "0.6388035", "0.63746876", "0.6359844", "0.6348411", "0.63458526", "0.63452053", "0.634055", "0.63326955", "0.6311195", "0.62999946" ]
0.9515447
0
Test case for aws_service_api_regions_get
def test_aws_service_api_regions_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_regions(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load regions from url specified in api base\n r = requests.get(r['regions']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('regions', r)", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def test_api_region(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['regions']).json()\n r = requests.get(r['regions'][0]['url']).json()\n self.assertIn('html', r)\n self.assertIn('id', r)\n self.assertIn('name', r)\n self.assertIn('url', r)\n self.assertIn('rivers', r)\n self.assertIn('sections', r)\n self.assertIn('gages', r)", "def test_aws_service_api_availability_zones_get(self):\n pass", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def get_valid_regions(self):\n pass", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def get_valid_regions(self):\n client = self._boto.client('ec2')\n\n regions = []\n for region in client.describe_regions().get('Regions', []):\n if getattr(RegionCode.Region, region.get('RegionName'), None) is not None:\n regions.append(RegionCode.Region[region.get('RegionName')])\n else:\n regions.append(region.get('RegionName'))\n\n return regions", "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def get_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_aws_service_api_network_subnets_get(self):\n pass", "def test_build__set_regions(self, valid_service: fixture) -> None:\n service: Service = valid_service\n\n assert service.regions == set_service_regions()", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def describe_regions(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_regions_with_options(request, runtime)", "def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')", "def get_valid_regions(self):\n conn = self._boto.ec2.connect_to_region(self.cli_region)\n\n regions = []\n for region in conn.get_all_regions():\n if getattr(RegionCode.Region, region.name, None) is not None:\n regions.append(RegionCode.Region[region.name])\n else:\n regions.append(region.name)\n\n return regions", "def get_regions(self):\n return self._regions", "def test_aws_service_api_vm_get(self):\n pass", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def test_observations_query_region(patch_get):\n result = gemini.Observations.query_region(coords, radius=0.3 * units.deg)\n assert isinstance(result, Table)\n assert len(result) > 0", "def get_region(self, region, namespace, region_id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/{0}', region, *[region_id], **filters)", "def describe_regions_with_options(\n self,\n request: dds_20151201_models.DescribeRegionsRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeRegionsResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.accept_language):\n query['AcceptLanguage'] = request.accept_language\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeRegions',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeRegionsResponse(),\n self.call_api(params, req, runtime)\n )", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")" ]
[ "0.7572894", "0.73636293", "0.7204617", "0.71709335", "0.7119498", "0.7119498", "0.6878119", "0.6786396", "0.6769581", "0.65975285", "0.6471772", "0.6452135", "0.6423102", "0.63701135", "0.63528806", "0.6258155", "0.6240444", "0.61896706", "0.6179061", "0.6056295", "0.6048398", "0.60006005", "0.60001445", "0.59789574", "0.59594923", "0.59594285", "0.59518224", "0.5941586", "0.5927057", "0.5925241" ]
0.9564887
0
Test case for aws_service_api_security_groups_get
def test_aws_service_api_security_groups_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_v1_groups_get(self):\n pass", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n response = self.client.get_groups()\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/groups\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_get_resource_group_list(self):\n pass", "def test_users_groups_get(self):\n pass", "def test_get_group(self):\n pass", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def test_get_group_v2(self):\n response = self.client.get_group(\"ABC123\", api_version=2)\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v2/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_api_v1_groups_names_get(self):\n pass", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def security_group_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_security_group(**kwargs)", "def test_groups_group_id_get(self):\n pass", "def security_groups(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_groups\")", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def test_get_rbac_authorization_api_group(self):\n pass", "def security_groups(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def test_getGroups(self):\n\t\turl = \"/groups/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"count\"], 1)", "def list_secgroups(self, name=None):" ]
[ "0.7902849", "0.7510473", "0.7510473", "0.7471958", "0.74338824", "0.74338824", "0.7391786", "0.73295635", "0.7019355", "0.6873554", "0.6837248", "0.68147165", "0.6743259", "0.6743259", "0.6727421", "0.6713464", "0.670311", "0.66311735", "0.6624133", "0.6624133", "0.6624133", "0.6624133", "0.6624133", "0.66174555", "0.66134924", "0.659987", "0.6595256", "0.65948844", "0.6594375", "0.65903616" ]
0.9491485
0
Test case for aws_service_api_snapshot_delete
def test_aws_service_api_snapshot_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def cli(env, snapshot_id):\n block_manager = SoftLayer.BlockStorageManager(env.client)\n deleted = block_manager.delete_snapshot(snapshot_id)\n\n if deleted:\n click.echo('Snapshot %s deleted' % snapshot_id)", "def delete_snapshot(SnapshotId=None):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def delete_snapshot(self, snapshot_id):\r\n\r\n self.iscsi_svc.deleteObject(id=snapshot_id)", "def delete_snapshot(self, snapshot):\n aname = \"cinder_v%s.delete_snapshot\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volume_snapshots.delete(snapshot)\n bench_utils.wait_for_status(\n snapshot,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def delete_snapshot(self, pool, project, share, snapshot):\n svc = self.snapshot_path % (pool, project, share, snapshot)\n ret = self.rclient.delete(svc)\n if ret.status != restclient.Status.NO_CONTENT:\n exception_msg = (_('Error deleting '\n 'snapshot: %(snapshot)s on '\n 'share: %(share)s to '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'snapshot': snapshot,\n 'share': share,\n 'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def delete_snapshot(session, snapshot, network):\n # type: (Session, Text, Text) -> None\n url_tail = \"/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, network, CoordConstsV2.RSC_SNAPSHOTS, snapshot\n )\n return _delete(session, url_tail)", "def delete_snapshot(self, context, snapshot_id):\n caller_context = context\n context = context.elevated()\n snapshot_ref = self.db.snapshot_get(context, snapshot_id)\n project_id = snapshot_ref['project_id']\n\n LOG.info(_(\"snapshot %s: deleting\"), snapshot_ref['id'])\n self._notify_about_snapshot_usage(\n context, snapshot_ref, \"delete.start\")\n\n try:\n LOG.debug(_(\"snapshot %s: deleting\"), snapshot_ref['id'])\n\n # Pass context so that drivers that want to use it, can,\n # but it is not a requirement for all drivers.\n snapshot_ref['context'] = caller_context\n\n self._delete_snapshot_cascaded(context, snapshot_id)\n except exception.SnapshotIsBusy:\n LOG.error(_(\"Cannot delete snapshot %s: snapshot is busy\"),\n snapshot_ref['id'])\n self.db.snapshot_update(context,\n snapshot_ref['id'],\n {'status': 'available'})\n return True\n except Exception:\n with excutils.save_and_reraise_exception():\n self.db.snapshot_update(context,\n snapshot_ref['id'],\n {'status': 'error_deleting'})\n\n # Get reservations\n try:\n if CONF.no_snapshot_gb_quota:\n reserve_opts = {'snapshots': -1}\n else:\n reserve_opts = {\n 'snapshots': -1,\n 'gigabytes': -snapshot_ref['volume_size'],\n }\n volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting snapshot\"))\n self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)\n self.db.snapshot_destroy(context, snapshot_id)\n LOG.info(_(\"snapshot %s: deleted successfully\"), snapshot_ref['id'])\n self._notify_about_snapshot_usage(context, snapshot_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n return True", "def test_aws_service_api_snapshots_post(self):\n pass", "def delete_snapshot(self, snapshot_id):\n resp, body = self.delete(\"snapshots/%s\" % snapshot_id)\n self.validate_response(schema.delete_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "def test_delete_image_signature(self):\n pass", "def delete(config: Config, ami: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n response = describe(config, ami, show_snapshot_id=True)\n\n ec2_client.deregister_image(ImageId=ami)\n\n ec2_client.delete_snapshot(SnapshotId=response[0][\"SnapshotId\"])", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def test_aws_service_api_snapshots_get(self):\n pass", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def test_delete_bucket(self):\n pass", "def _removeAWSSnapshot(self, snap_id: str):\n log.warning(f'Deleting AWS EBS Snapshot {snap_id}')\n ec2_client = boto3.client('ec2', region_name=self.aws_region)\n try:\n ec2_client.delete_snapshot(SnapshotId=snap_id)\n except Exception as error: # pylint: disable=broad-except\n log.error(f'Failed to delete AWS EBS Snapshot {snap_id}: {str(error)}')", "def delete_snapshot(self, snapshot):\n self._impl.delete_snapshot(snapshot)", "def delete_snapshot_object(session, key, snapshot=None):\n # type: (Session, str, Optional[str]) -> None\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})", "def test_delete_db_instance_snapshot_no_wait(self):\n _create_db_instance(self.hook)\n _create_db_instance_snapshot(self.hook)\n\n instance_snapshot_operator = RdsDeleteDbSnapshotOperator(\n task_id=\"test_delete_db_instance_snapshot_no_wait\",\n db_type=\"instance\",\n db_snapshot_identifier=DB_INSTANCE_SNAPSHOT,\n aws_conn_id=AWS_CONN,\n dag=self.dag,\n wait_for_completion=False,\n )\n _patch_hook_get_connection(instance_snapshot_operator.hook)\n with patch.object(instance_snapshot_operator.hook, \"wait_for_db_snapshot_state\") as mock_wait:\n instance_snapshot_operator.execute(None)\n mock_wait.assert_not_called()\n\n with pytest.raises(self.hook.conn.exceptions.ClientError):\n self.hook.conn.describe_db_snapshots(DBSnapshotIdentifier=DB_INSTANCE_SNAPSHOT)", "def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def delete_volume_snapshot(volume_snapshots):\n if type(volume_snapshots) is not list:\n volumes = [volume_snapshots]\n command = 'cinder snapshot-delete %s' % \\\n \" \".join(snapshot['id'] for snapshot in volume_snapshots)\n d = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]", "def delete_snapshot(self, sSnapshotUuid, bChild = False):\n\t\treturn Job(SDK.PrlVm_DeleteSnapshot(self.handle, sSnapshotUuid, bChild)[0])", "def check_vs_deleted(vs_name, created_objects):\r\n if keep_objects:\r\n return\r\n api_instance = client.CustomObjectsApi()\r\n val = 0\r\n while val < 12:\r\n try:\r\n api_response = api_instance.get_namespaced_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshots\",\r\n name=vs_name,\r\n namespace=namespace_value\r\n )\r\n LOGGER.debug(api_response)\r\n time.sleep(15)\r\n LOGGER.info(f\"Volume Snapshot Delete : Checking deletion for {vs_name}\")\r\n val += 1\r\n except ApiException:\r\n LOGGER.info(f\"Volume Snapshot Delete : {vs_name} deletion confirmed\")\r\n return\r\n LOGGER.error(f\"Volume Snapshot Delete : {vs_name} is not deleted , asserting\")\r\n clean_with_created_objects(created_objects)\r\n assert False", "def delete_snapshot(self, snapshot_id):\n response = snapshot.delete_snapshot(self.url, self.verb,\n self.headers, self.version,\n snapshot_id)\n if response is not None :\n res = DeleteSnapshotResponse.DeleteSnapshotResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def test_unlink_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n linked_snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(linked_snap_details.get('linked'))\n self.replication.unlink_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None" ]
[ "0.7890424", "0.7668865", "0.7533782", "0.73568654", "0.7311986", "0.7207969", "0.7096665", "0.70816356", "0.69637513", "0.690928", "0.69079185", "0.68927884", "0.6888659", "0.6888522", "0.68796116", "0.6850907", "0.67979336", "0.6763749", "0.6683268", "0.663037", "0.6612236", "0.66099364", "0.6579267", "0.6542798", "0.6530108", "0.6504199", "0.6468574", "0.6456819", "0.64499676", "0.64459616" ]
0.9599573
0
Test case for aws_service_api_snapshots_get
def test_aws_service_api_snapshots_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_snapshots_post(self):\n pass", "def xtest_snapshot_api(self):\n\n req = httplib2.Http(\".cache\")\n body = r\"\"\"{ \"snapshot\": { \"instanceId\": \"123\", \"name\": \"dbapi_test\" } }\"\"\"\n \n # Test creating an snapshot without a body in the request.\n LOG.info(\"* Creating an snapshot without a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test creating an snapshot with a malformed body.\n LOG.info(\"* Creating an snapshot with a malformed body\")\n bad_body = r\"\"\"{ \"snapshot\": {}]\"\"\"\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", bad_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n\n # Test listing all snapshots with a body in the request.\n LOG.info(\"* Listing all snapshots with a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test listing all snapshots for a specific instance with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\")\n resp, content = req.request(API_URL + \"snapshots?instanceId=\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test listing all snapshots for a specific tenant with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\") \n resp, content = req.request(API_URL + \"snapshots?tenantId=\" + TENANT_ID, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test getting a non-existent snapshot.\n LOG.info(\"* Getting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test creating a new instance from a dummy snapshot.\n instance_body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"102\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.1.2\"\n },\n \"databases\": [\n {\n \"name\": \"testdb\",\n \"character_set\": \"utf8\",\n \"collate\": \"utf8_general_ci\"\n },\n {\n \"name\": \"abcdefg\"\n }\n ],\n \"volume\":\n {\n \"size\": \"2\"\n }\n }\n }\"\"\"\n \n LOG.info(\"* Creating instance from dummy snapshot\")\n snap_body = json.loads(instance_body)\n snap_body['instance']['snapshotId'] = \"dummy\"\n snap_body = json.dumps(snap_body)\n resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n content = json.loads(content)\n self.assertEqual(500, resp.status)\n \n # This test is handled by the error handling in the API server\n# # Test creating a new instance from bad snapshot data in the body.\n# LOG.debug(\"* Creating instance from bad snapshot data in the body\")\n# snap_body = json.loads(instance_body)\n# snap_body['instance']['snapshotId'] = {}\n# snap_body = json.dumps(snap_body)\n# resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n# LOG.debug(resp)\n# LOG.debug(content)\n# content = json.loads(content)\n# self.assertEqual(500, resp.status) \n \n # Test deleting a non-existent snapshot.\n LOG.info(\"* Deleting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"DELETE\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def test_snapshots(self):\n def get_snapshots(*_args, **_kwargs):\n return {\n 'items': [\n {'selfLink': 'url/snapshot'},\n ],\n }\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n expected_urls = ['url/snapshot']\n urls = snapshots.fetch(key)\n self.assertItemsEqual(urls, expected_urls)", "def test_no_snapshots(self):\n def get_snapshots(*_args, **_kwargs):\n return {}\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n urls = snapshots.fetch(key)\n self.failIf(urls)", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_snapshot_listing(self):\n page_size = 5\n with mock.patch.object(TDRClient, 'page_size', page_size):\n paged_snapshots = self._public_tdr_client.snapshot_names_by_id()\n snapshots = self._public_tdr_client.snapshot_names_by_id()\n self.assertEqual(snapshots, paged_snapshots)", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_image_get(self):\n pass", "def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()", "def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)", "def test_aws_service_api_volumes_get(self):\n pass", "def test_snapshot_unspecified(self):\n key = self.create_entity('project', None, [])\n urls = snapshots.fetch(key)\n self.failIf(urls)", "def test_aws_service_api_public_image_get(self):\n pass", "def getInfo(self):\n self.info = requests.get(G.api + self.testId + '/snapshots/' + self.hash, auth=(G.username, G.authkey)).json()\n return self.info", "def test_aws_service_api_vm_get(self):\n pass", "def test_snapshots_with_page_token(self):\n def get_snapshots(*_args, **kwargs):\n if kwargs.get('page_token'):\n return {\n 'items': [\n {'selfLink': 'url/snapshot-2'},\n ],\n }\n return {\n 'items': [\n {'selfLink': 'url/snapshot-1'},\n ],\n 'nextPageToken': 'page-token',\n }\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n expected_urls = ['url/snapshot-1', 'url/snapshot-2']\n urls = snapshots.fetch(key)\n self.assertItemsEqual(urls, expected_urls)", "def test_aws_service_api_public_images_get(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def check_snapshots(grafana_server):\n print(info + \" Checking if snapshot api requires authentiation...\" + end)\n \n response = send_request(grafana_server+\"/api/snapshots\", \"POST\")\n \n if (response) == 401:\n print(bad + \" Snapshot endpoint requires authentication! Host not vulnerable.\" + end)\n elif (response) == 415:\n print(good + \" Snapshot endpoint doesn't seem to require authentication! Host may be vulnerable.\" + end)\n else:\n print(info + \" Didn't received expected status code when checking snapshot API. Check again.\" + end)\n\n return", "def test_snapshots(self):\n snapshots_dir = REPO_ROOT / \"tests\" / \"snapshots\"\n snapshot_files = set(f.name for f in snapshots_dir.glob(\"*.txt\"))\n assert snapshot_files == SNAPSHOTS_USED", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def test_aws_service_api_private_images_get(self):\n pass", "def get_volume_snapshots(\n self,\n references=None, # type: List[models.ReferenceType]\n sources=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n destroyed=None, # type: bool\n filter=None, # type: str\n ids=None, # type: List[str]\n limit=None, # type: int\n names=None, # type: List[str]\n offset=None, # type: int\n sort=None, # type: List[str]\n source_ids=None, # type: List[str]\n source_names=None, # type: List[str]\n total_item_count=None, # type: bool\n total_only=None, # type: bool\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeSnapshotGetResponse\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n destroyed=destroyed,\n filter=filter,\n ids=ids,\n limit=limit,\n names=names,\n offset=offset,\n sort=sort,\n source_ids=source_ids,\n source_names=source_names,\n total_item_count=total_item_count,\n total_only=total_only,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volume_snapshots_api.api20_volume_snapshots_get_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n _process_references(sources, ['source_ids', 'source_names'], kwargs)\n return self._call_api(endpoint, kwargs)", "def describe_snapshots(self, snpashot_ids = None, max_results = -1, next_token = \"\", detail = True):\n response = snapshot.describe_snapshots(self.url, self.verb, self.headers, self.version,\n snpashot_ids, max_results, next_token, detail)\n if response is not None :\n res = DescribeSnapshotsResponse.DescribeSnapshotsResponse() \n parseString(str(response.text), res)\n return res\n else :\n return None", "def test_smoker_latest_get(self):\n pass", "def test_get_versions(self):\n versions = get_versions(self.session_mock, S3_BUCKET, S3_OBJECT_WITH_VERSIONS, S3_MAX_KEYS)\n self.list_object_versions_mock.paginate.assert_called_once_with(\n Bucket=S3_BUCKET, Prefix=S3_OBJECT_WITH_VERSIONS, MaxKeys=S3_MAX_KEYS\n )\n self.assertEqual(4, len(versions))", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def show_snapshot(self, snapshot_id):\n url = \"snapshots/%s\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)" ]
[ "0.8165117", "0.76457405", "0.7574539", "0.7050154", "0.7035185", "0.6838947", "0.6798095", "0.67245835", "0.6693685", "0.6692401", "0.6687871", "0.66250813", "0.65893096", "0.6535789", "0.6534415", "0.6528434", "0.64653736", "0.63096005", "0.6277198", "0.62761366", "0.6259826", "0.6187943", "0.61846685", "0.6164116", "0.6158407", "0.6142934", "0.61282367", "0.6123606", "0.6110196", "0.6090413" ]
0.9516805
0
Test case for aws_service_api_snapshots_post
def test_aws_service_api_snapshots_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_snapshots_get(self):\n pass", "def xtest_snapshot_api(self):\n\n req = httplib2.Http(\".cache\")\n body = r\"\"\"{ \"snapshot\": { \"instanceId\": \"123\", \"name\": \"dbapi_test\" } }\"\"\"\n \n # Test creating an snapshot without a body in the request.\n LOG.info(\"* Creating an snapshot without a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test creating an snapshot with a malformed body.\n LOG.info(\"* Creating an snapshot with a malformed body\")\n bad_body = r\"\"\"{ \"snapshot\": {}]\"\"\"\n resp, content = req.request(API_URL + \"snapshots\", \"POST\", bad_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n\n # Test listing all snapshots with a body in the request.\n LOG.info(\"* Listing all snapshots with a body\")\n resp, content = req.request(API_URL + \"snapshots\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test listing all snapshots for a specific instance with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\")\n resp, content = req.request(API_URL + \"snapshots?instanceId=\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test listing all snapshots for a specific tenant with a body in the request.\n LOG.info(\"* Listing all snapshots for a specific instance with a body\") \n resp, content = req.request(API_URL + \"snapshots?tenantId=\" + TENANT_ID, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test getting a non-existent snapshot.\n LOG.info(\"* Getting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test creating a new instance from a dummy snapshot.\n instance_body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"102\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.1.2\"\n },\n \"databases\": [\n {\n \"name\": \"testdb\",\n \"character_set\": \"utf8\",\n \"collate\": \"utf8_general_ci\"\n },\n {\n \"name\": \"abcdefg\"\n }\n ],\n \"volume\":\n {\n \"size\": \"2\"\n }\n }\n }\"\"\"\n \n LOG.info(\"* Creating instance from dummy snapshot\")\n snap_body = json.loads(instance_body)\n snap_body['instance']['snapshotId'] = \"dummy\"\n snap_body = json.dumps(snap_body)\n resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n content = json.loads(content)\n self.assertEqual(500, resp.status)\n \n # This test is handled by the error handling in the API server\n# # Test creating a new instance from bad snapshot data in the body.\n# LOG.debug(\"* Creating instance from bad snapshot data in the body\")\n# snap_body = json.loads(instance_body)\n# snap_body['instance']['snapshotId'] = {}\n# snap_body = json.dumps(snap_body)\n# resp, content = req.request(API_URL + \"instances\", \"POST\", snap_body, AUTH_HEADER)\n# LOG.debug(resp)\n# LOG.debug(content)\n# content = json.loads(content)\n# self.assertEqual(500, resp.status) \n \n # Test deleting a non-existent snapshot.\n LOG.info(\"* Deleting dummy snapshot\")\n resp, content = req.request(API_URL + \"snapshots/dummy\", \"DELETE\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_simengine_rest_snapshot_simulation_node_post(self):\n pass", "def create_snapshot(self, **kwargs):\n post_body = json.dumps({'snapshot': kwargs})\n resp, body = self.post('snapshots', post_body)\n body = json.loads(body)\n self.validate_response(schema.create_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def _action(self, action, snapshot_id, info=None, \n raw_body = None, **kwargs):\n if not raw_body :\n body = {action: info}\n else :\n body = raw_body\n url = '/snapshots/%s/action' % snapshot_id\n return self._post(url, body=body, **kwargs)", "def test_snapshots(self):\n def get_snapshots(*_args, **_kwargs):\n return {\n 'items': [\n {'selfLink': 'url/snapshot'},\n ],\n }\n self.mock(snapshots.gce.Project, 'get_snapshots', get_snapshots)\n\n key = self.create_entity('project', 'name', ['key:value'])\n expected_urls = ['url/snapshot']\n urls = snapshots.fetch(key)\n self.assertItemsEqual(urls, expected_urls)", "def test_volumes_post(self):\n pass", "def test_snapshot(self):\n # Try to create a snapshot with a wrong machine_uuid.\n status = self.proxy.snapshot.create(\n PROVIDER_ID, \"Doesn't exist\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a snapshot with a wrong provider.\n status = self.proxy.snapshot.create(\n \"Doesn't exist\", self.machine_uuid\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Create a snapshot.\n status = self.proxy.snapshot.create(\n PROVIDER_ID, self.machine_uuid\n )\n self.check_xmlrpc_command_result(status)\n\n # Try to destroy snapshots with a wrong provider.\n status = self.proxy.snapshot.destroy(\n \"Doesn't exist\", self.machine_uuid\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Destroy snapshots.\n status = self.proxy.snapshot.destroy(\n PROVIDER_ID, self.machine_uuid\n )\n self.check_xmlrpc_command_result(status)", "def _action(self, action, snapshot, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/snapshots/%s/action' % base.getid(snapshot)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_smoker_post(self):\n pass", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_snapshot_variables_action(self):\n pass", "def test_post_book_method(self):\n book_id = 4\n title = \"The Whistler\"\n author = \"John Grisham\"\n copies = 3\n result = self.book.post_book(book_id, title, author, copies)\n self.assertEqual(result, [{\"Title\": \"The Whistler\",\n \"Author\": \"John Grisham\",\n \"Copies\": 3}])", "def test_create_snapshot(self, mock_ghn):\n # Now snapshot the volume and check commands\n snapshot = {'volume_name': 'volume10',\n 'volume_id': 'xxx', 'display_name': 'snap10',\n 'name': '123abc', 'volume_size': 10, 'id': '123abc',\n 'volume': {'provider_id': 'space10'}}\n ret = self.driver.create_snapshot(snapshot)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': snapshot['display_name'], 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'snap10'}\n self.assertDictMatch(expected_pid, ret)", "def check_snapshots(grafana_server):\n print(info + \" Checking if snapshot api requires authentiation...\" + end)\n \n response = send_request(grafana_server+\"/api/snapshots\", \"POST\")\n \n if (response) == 401:\n print(bad + \" Snapshot endpoint requires authentication! Host not vulnerable.\" + end)\n elif (response) == 415:\n print(good + \" Snapshot endpoint doesn't seem to require authentication! Host may be vulnerable.\" + end)\n else:\n print(info + \" Didn't received expected status code when checking snapshot API. Check again.\" + end)\n\n return", "def post_create_snapshot(self, response: pubsub.Snapshot) -> pubsub.Snapshot:\n return response", "def post_list_snapshots(\n self, response: pubsub.ListSnapshotsResponse\n ) -> pubsub.ListSnapshotsResponse:\n return response", "def post_volume_snapshots(\n self,\n sources=None, # type: List[models.ReferenceType]\n volume_snapshot=None, # type: models.VolumeSnapshotPost\n authorization=None, # type: str\n x_request_id=None, # type: str\n on=None, # type: str\n source_ids=None, # type: List[str]\n source_names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeSnapshotResponse\n kwargs = dict(\n volume_snapshot=volume_snapshot,\n authorization=authorization,\n x_request_id=x_request_id,\n on=on,\n source_ids=source_ids,\n source_names=source_names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volume_snapshots_api.api20_volume_snapshots_post_with_http_info\n _process_references(sources, ['source_ids', 'source_names'], kwargs)\n return self._call_api(endpoint, kwargs)", "def create_snapshot(project, node, snap_name):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.SNAP_NAME_PARAMETER: snap_name}\n res = requests.put(_url + \"create_snapshot/\", data=data,\n auth=(_username, _password))\n click.echo(res.content)", "def _create_snapshot(self, name=None, metadata=None):\n req = fakes.HTTPRequest.blank('/v3/snapshots')\n req.environ['cinder.context'] = self.ctx\n snap = {\"volume_id\": fake.VOLUME_ID,\n \"display_name\": name or \"Volume Test Name\",\n \"description\": \"Volume Test Desc\"\n }\n if metadata:\n snap[\"metadata\"] = metadata\n body = {\"snapshot\": snap}\n self.controller.create(req, body=body)", "def _test_post(client, request, expected_response_parameters_tuple=None, expected_status_code=status.HTTP_200_OK):\n response = client.post(AGGREGATE_ENDPOINT, request)\n assert response.status_code == expected_status_code\n if expected_response_parameters_tuple is not None:\n expected_response = _generate_expected_response(*expected_response_parameters_tuple)\n assert json.loads(response.content.decode(\"utf-8\")) == expected_response", "def _action(self, action, group_snapshot, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/group_snapshots/%s/action' % base.getid(group_snapshot)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_snapshot_listing(self):\n page_size = 5\n with mock.patch.object(TDRClient, 'page_size', page_size):\n paged_snapshots = self._public_tdr_client.snapshot_names_by_id()\n snapshots = self._public_tdr_client.snapshot_names_by_id()\n self.assertEqual(snapshots, paged_snapshots)", "def test_aws_service_api_volume_attachment_put(self):\n pass" ]
[ "0.7824954", "0.727321", "0.71308035", "0.68776023", "0.6790315", "0.664617", "0.6434439", "0.6432623", "0.63407123", "0.63197875", "0.6133041", "0.6101257", "0.6053529", "0.60171944", "0.59851056", "0.59770787", "0.595111", "0.5950911", "0.5950342", "0.5913461", "0.58947", "0.58238506", "0.5811929", "0.5804382", "0.57608575", "0.5756038", "0.57294387", "0.57270896", "0.57194746", "0.57153684" ]
0.9483812
0
Test case for aws_service_api_validate_subscription_post
def test_aws_service_api_validate_subscription_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_subscription(self):\n pass", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_get_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def test_update_subscription(self):\n pass", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def verify_subscription(subscription_id: str):\n verify_data = {\n \"subscription_id\": subscription_id\n }\n json_data = json.dumps(verify_data)\n r = requests.post(f\"{os.getenv('API_ENDPOINT')}verify?token={os.getenv('NR_TOKEN')}\", json_data)\n return r.json()", "def test_create_subscription_template(self):\n pass", "def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )", "def test_delete_subscription(self):\n pass", "def PostSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def validate_sub(self):\n self._validate_claim_value('sub')", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_issue_subscriptions(self):\n pass", "def test_process_subscriptions(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def clean_subscriptions(self):\n cleaned_data = super(SignupSubscriptionForm, self).clean() or self.cleaned_data\n checked = 0\n for key, value in cleaned_data.items():\n if key.startswith('subscription') and value:\n checked += 1\n if checked > 0:\n return cleaned_data\n else:\n raise ValidationError(self.unchecked_error)", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_update_subscription(self):\n args = dict(trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )\n\n try:\n self.arb.update_subscription(**args)\n except KeyError:\n self.arb.update_subscription(subscription_id=u\"1234\", **args)", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_incompatible_subscription_and_tenant():\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def validate(self, data):\n if not models.Subscription.objects.filter(\n is_active=True, user=self.context[\"request\"].user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"You must have an active premium subscription in order to \"\n \"transfer it to another user.\"\n )\n )\n\n if models.Subscription.objects.filter(\n is_active=True, user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient already has an active premium \"\n \"subscription.\"\n )\n )\n\n if models.AppleReceipt.objects.filter(\n subscription__user=self._recipient_email_inst.user\n ).exists():\n raise serializers.ValidationError(\n ugettext(\n \"The intended recipient has an Apple subscription that \"\n \"must be removed before they can accept a transfer.\"\n )\n )\n\n return data" ]
[ "0.72985333", "0.6906576", "0.6746659", "0.67185956", "0.670407", "0.65617806", "0.6491322", "0.6466054", "0.64172935", "0.64076334", "0.6401086", "0.63378197", "0.62992346", "0.624313", "0.62266105", "0.6217573", "0.6185081", "0.6161922", "0.61287135", "0.6126021", "0.61233646", "0.6122874", "0.6117058", "0.6096322", "0.60354817", "0.6022656", "0.59699845", "0.5928468", "0.59127164", "0.5868807" ]
0.95499164
0
Test case for aws_service_api_vm_command_put
def test_aws_service_api_vm_command_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_aws_service_api_vm_workshift_put(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vm_get(self):\n pass", "def test_kyc_put_request(self):\n pass", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_kyc_put_request_legal(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def handle_put(self, api, command):\n return self._make_request_from_command('PUT', command)", "def test_client_can_do_put_request(self):\n response = self.httpbin_4.test_requests_put_method()\n self.assertEqual(response.request.method, 'PUT')\n self.assertEqual(response.status_code, 200)", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def testPut(self):\n # XXX - not actually a unit test\n expectedOutput = (b'Transferred ' + self.testDir.asBytesMode().path +\n b'/testfile1 to ' + self.testDir.asBytesMode().path +\n b'/test\"file2')\n def _checkPut(result):\n self.assertFilesEqual(self.testDir.child('testfile1'),\n self.testDir.child('test\"file2'))\n self.assertTrue(result.endswith(expectedOutput))\n return self.runCommand('rm \"test\\\\\"file2\"')\n\n d = self.runCommand('put %s/testfile1 \"test\\\\\"file2\"'\n % (self.testDir.path,))\n d.addCallback(_checkPut)\n d.addCallback(lambda _: self.assertFalse(\n self.testDir.child('test\"file2').exists()))\n return d", "def test_put_method(self):\n self.getPage('/blah', method='PUT')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"blah\"}')", "def test_add_item_at_using_put(self):\n pass", "def test_update_vip(self):\r\n resource = 'vip'\r\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def simulate_put(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'PUT', path, **kwargs)", "def test_vmcp_01(self):\r\n # Even though the key does not exists, let's patch it to test\r\n # all the errors\r\n with patch('os.path.exists', return_value=True):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status_code'] == 415, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err\r\n assert err['exception_msg'] == 'cvm_salt parameter is missing'", "def test_update_vip(self):\n resource = 'vip'\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname',\n '--tags', 'a', 'b'],\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_update_bucket(self):\n pass", "def put(self):\n pass", "def put(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_quote_guest_payment_method_management_v1_set_put(self):\n pass" ]
[ "0.81759644", "0.74614275", "0.72217697", "0.6894823", "0.6842685", "0.67816526", "0.6751381", "0.6558063", "0.64946073", "0.63549376", "0.6314738", "0.61670405", "0.614158", "0.61310565", "0.6130978", "0.6096973", "0.60926175", "0.6065767", "0.6025456", "0.5973508", "0.59611523", "0.5956278", "0.5950134", "0.59459955", "0.59149057", "0.5904409", "0.59025896", "0.59025896", "0.5875208", "0.5834913" ]
0.9491606
0
Test case for aws_service_api_vm_delete
def test_aws_service_api_vm_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_delete(self):\n pass", "def test_delete_virtual_service(self):\n pass", "def delete_vm(client, resource_group_name, vm_name):\n return client.delete(resource_group_name, vm_name)", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def deleteVirtualMachine(self,node,vmid):\n data = self.connect('delete',\"nodes/%s/qemu/%s\" % (node,vmid),None)\n return data", "def delete_vm(self, region: str, instance_id: str):\n raise NotImplementedError()", "def vm_deprovision(self, params: dict) -> Tuple[\"Status\", dict]:", "def delete(self, psvm):\n self._delete('/os-psvm/%s' % (base.getid(psvm)))", "def vm_delete(vm_hostname, retire=False):\n\n with _get_vm(vm_hostname, unlock=retire, allow_retired=True) as vm:\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm_status_code = vm.aws_describe_instance_status(\n vm.dataset_obj['aws_instance_id'])\n if vm_status_code != AWS_RETURN_CODES['stopped']:\n raise InvalidStateError(\n '\"{}\" is still running.'.format(vm.fqdn))\n else:\n vm.aws_delete()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n # Make sure the VM has a hypervisor and that it is defined on it.\n # Abort if the VM has not been defined.\n _check_defined(vm)\n\n # Make sure the VM is shut down, abort if it is not.\n if (\n vm.hypervisor\n and vm.hypervisor.vm_defined(vm)\n and vm.is_running()\n ):\n raise InvalidStateError('\"{}\" is still running.'.format(\n vm.fqdn)\n )\n\n # Delete the VM from its hypervisor if required.\n if vm.hypervisor and vm.hypervisor.vm_defined(vm):\n vm.hypervisor.undefine_vm(vm)\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n # Delete the machines cert from puppet in case we want to build one with the same name in the future\n clean_cert(vm.dataset_obj)\n\n # Delete the serveradmin object of this VM\n # or update its state to 'retired' if retire is True.\n if retire:\n vm.dataset_obj['state'] = 'retired'\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and set to \"retired\" state.'.format(\n vm.fqdn)\n )\n else:\n vm.dataset_obj.delete()\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and deleted from Serveradmin'.format(\n vm.fqdn)\n )", "def test_ipam_vrfs_delete(self):\n pass", "def test_vault_delete_vault_item(self):\n pass", "def test_delete_vehicle(self):\n vehicle = sample_vehicle(user=self.user)\n\n url = detail_url(vehicle.id)\n res = self.client.delete(url)\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)", "def test_aws_service_api_vm_get(self):\n pass", "def test_ipam_services_delete(self):\n pass", "def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0", "def test_aws_service_api_vm_command_put(self):\n pass", "def delete_vm(self, userid):\n LOG.info(\"Begin to delete vm %s\", userid)\n self._smtclient.delete_vm(userid)\n LOG.info(\"Complete delete vm %s\", userid)", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_delete_cloud(self):\n pass", "def step_delete(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console delete --resource-group {resourceGroup} \"\n \"--virtual-machine-name {virtualMachineName} --yes\",\n checks=checks,\n )", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def delete_vm(self, tenant_id, vm_id):\n self.delete_vm_bulk(tenant_id, [vm_id])", "def test_remove_vm(self, instance_name):\n self.instances.pop(instance_name)", "def test_aws_service_api_vm_details_get(self):\n pass", "def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def test_vault_delete_vault_section(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_keypair_delete(self):\n pass" ]
[ "0.76845914", "0.7401219", "0.73782235", "0.7279414", "0.72571427", "0.7151605", "0.70824784", "0.70211935", "0.700503", "0.6966841", "0.69554144", "0.69535273", "0.69121355", "0.6897992", "0.687267", "0.6852948", "0.6817927", "0.6777296", "0.6759868", "0.6732892", "0.6713095", "0.66940624", "0.66866004", "0.66573006", "0.6605126", "0.6600817", "0.6591113", "0.6582968", "0.6566726", "0.6544165" ]
0.95375043
0
Test case for aws_service_api_vm_details_get
def test_aws_service_api_vm_details_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_get(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def get_details(self, psvm):\n return self.get(psvm)", "def test_aws_service_api_vm_patch(self):\n pass", "def test_virtualservice_get(self):\n pass", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def test_get_virtual_service(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vm_password_get(self):\n pass", "def test_data_object_get_details(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_getting_segment_details(self):\n pass", "def test_get_details7(self):\n pass", "def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_get_info(self):\n pass", "def print_vm_info(virtual_machine):\n summary = virtual_machine.summary\n print(summary.runtime.host)\n print(\"Name : \", summary.config.name)\n print(\"Template : \", summary.config.template)\n print(\"Path : \", summary.config.vmPathName)\n print(\"Guest : \", summary.config.guestFullName)\n print(\"Instance UUID : \", summary.config.instanceUuid)\n print(\"Bios UUID : \", summary.config.uuid)\n annotation = summary.config.annotation\n if annotation:\n print(\"Annotation : \", annotation)\n print(\"State : \", summary.runtime.powerState)\n if summary.guest is not None:\n ip_address = summary.guest.ipAddress\n tools_version = summary.guest.toolsStatus\n if tools_version is not None:\n print(\"VMware-tools: \", tools_version)\n else:\n print(\"Vmware-tools: None\")\n if ip_address:\n print(\"IP : \", ip_address)\n else:\n print(\"IP : None\")\n if summary.runtime.question is not None:\n print(\"Question : \", summary.runtime.question.text)\n print(\"\")", "def print_vm_info(virtual_machine):\n config = virtual_machine.config\n print(\"Name : \", config.name)\n print(\"Template : \", config.template)\n print(\"Guest : \", config.guestFullName)\n print(\"Instance UUID : \", config.instanceUuid)\n print(\"Bios UUID : \", config.uuid)\n print(\"\")", "def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )", "def get_details(self):", "def _check_vm_record(self):\n instances = self.conn.list_instances()\n self.assertEquals(len(instances), 1)\n\n # Get Nova record for VM\n vm_info = self.conn.get_info(1)\n\n # Get record for VM\n vms = vmwareapi_fake._get_objects(\"VirtualMachine\")\n vm = vms[0]\n\n # Check that m1.large above turned into the right thing.\n mem_kib = long(self.type_data['memory_mb']) << 10\n vcpus = self.type_data['vcpus']\n self.assertEquals(vm_info['max_mem'], mem_kib)\n self.assertEquals(vm_info['mem'], mem_kib)\n self.assertEquals(vm.get(\"summary.config.numCpu\"), vcpus)\n self.assertEquals(vm.get(\"summary.config.memorySizeMB\"),\n self.type_data['memory_mb'])\n\n # Check that the VM is running according to Nova\n self.assertEquals(vm_info['state'], power_state.RUNNING)\n\n # Check that the VM is running according to vSphere API.\n self.assertEquals(vm.get(\"runtime.powerState\"), 'poweredOn')", "def get_vm(client, resource_group_name, vm_name):\n return client.get(resource_group_name, vm_name)", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def test_get_details(self):\n\t\tactual_details = self.watcher.analyze(layers=[self.second_layer])\n\t\texpected_details = self.watcher.get_details()\n\t\t\n\t\tself.assertEqual(len(actual_details), len(expected_details), \"actual and expected details differ\")", "def test_aws_service_api_vm_tag_put(self):\n pass" ]
[ "0.86986274", "0.82846344", "0.7914184", "0.7215921", "0.68684095", "0.68192875", "0.66386724", "0.6511931", "0.6482203", "0.646786", "0.6464285", "0.637554", "0.6286821", "0.6259161", "0.6224959", "0.62205684", "0.6214965", "0.6181512", "0.61462253", "0.61442876", "0.6130757", "0.61073494", "0.6103523", "0.60961187", "0.6081588", "0.60616285", "0.60382724", "0.5986765", "0.59838843", "0.5981153" ]
0.95614815
0
Test case for aws_service_api_vm_get
def test_aws_service_api_vm_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_get_virtual_service(self):\n pass", "def test_aws_service_api_vm_password_get(self):\n pass", "def get_vm(client, resource_group_name, vm_name):\n return client.get(resource_group_name, vm_name)", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_volumes_get(self):\n pass", "def test_vmcp_01(self):\r\n # Even though the key does not exists, let's patch it to test\r\n # all the errors\r\n with patch('os.path.exists', return_value=True):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status_code'] == 415, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err\r\n assert err['exception_msg'] == 'cvm_salt parameter is missing'", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "def get(self, psvm):\n return self._get('/os-psvm/%s' % (base.getid(psvm)), \"psvm\")", "def test_aws_service_api_image_get(self):\n pass", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def get_vm(**kwargs):\n model = self.db.vm_table_from_provider('openstack')\n vm = self.db.select(model, **kwargs).all()\n assert len(vm) == 1, vm\n vm = vm[0]\n return vm", "def test_aws_service_api_flavor_get(self):\n pass", "def get_vm(client, vm_name):\n names = set([vm_name])\n vms = client.vcenter.VM.list(VM.FilterSpec(names=names))\n if len(vms) == 0:\n print(\"VM with name ({}) not found\".format(vm_name))\n return None\n vm = vms[0].vm\n print(\"Found VM '{}' ({})\".format(vm_name, vm))\n return vm", "def test_get_virtualization_realms(self):\n pass", "def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)", "def test_aws_service_api_flavors_get(self):\n pass", "def test_aws_service_api_interfaces_get(self):\n pass", "def test_aws_service_api_public_image_get(self):\n pass" ]
[ "0.91339034", "0.8864053", "0.861006", "0.76438653", "0.72016954", "0.71333116", "0.7080705", "0.70437694", "0.70271695", "0.69803905", "0.69289654", "0.6851026", "0.6775111", "0.6690608", "0.6568262", "0.64512783", "0.6433684", "0.64195716", "0.6370653", "0.63509077", "0.6304741", "0.6301841", "0.62117493", "0.620867", "0.62070096", "0.6190281", "0.6179367", "0.6160804", "0.61467683", "0.6053964" ]
0.9452272
0
Test case for aws_service_api_vm_management_get
def test_aws_service_api_vm_management_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_get(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_vm_password_get(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_get_virtualization_realms(self):\n pass", "def test_get_virtual_service(self):\n pass", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def get_vm(client, resource_group_name, vm_name):\n return client.get(resource_group_name, vm_name)", "def get(self, psvm):\n return self._get('/os-psvm/%s' % (base.getid(psvm)), \"psvm\")", "def test_get_virtual_machine_count_metrics(self):\n pass", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_vmcp_01(self):\r\n # Even though the key does not exists, let's patch it to test\r\n # all the errors\r\n with patch('os.path.exists', return_value=True):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status_code'] == 415, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err\r\n assert err['exception_msg'] == 'cvm_salt parameter is missing'", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_get_virtualization_realm(self):\n pass", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def test_get_virtual_machine_count_metrics1(self):\n pass", "def get_details(self, psvm):\n return self.get(psvm)", "def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)", "def test_aws_service_api_image_get(self):\n pass", "def test_get_virtualization_realm_resources(self):\n pass", "def test_volumes_get(self):\n pass", "def test_smoker_get(self):\n pass", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err" ]
[ "0.8650585", "0.8562624", "0.8373791", "0.70795417", "0.6962664", "0.6897586", "0.6574185", "0.65094703", "0.64713967", "0.6338804", "0.62986034", "0.62899095", "0.6258442", "0.6202984", "0.61594754", "0.61232984", "0.6044133", "0.6041169", "0.6038002", "0.6024266", "0.5978438", "0.59636587", "0.59558004", "0.59067804", "0.58909094", "0.58817333", "0.58594066", "0.5858459", "0.58581257", "0.5818843" ]
0.95178163
0
Test case for aws_service_api_vm_password_get
def test_aws_service_api_vm_password_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetPassword(self):\n pass", "def get_password(self):\n raise NotImplementedError('get_password')", "def GetPassword(self):\n return self._password", "def password(self) -> str:", "def test_invalid_password(self):\n pass", "def _get_password(self):\r\n return self._password", "def _get_password(self):\n return self._password", "def password(self):\n return self._password()", "def get_verified_password(self):\n raise NotImplementedError('get_verified_password')", "def test_change_password_invalid_current_password(self):\n kwargs = {\"current_password\": self.test_args[\"invalid_current_password\"], \"return_response_obj\": True,\n \"return_failure_response\": True}\n response = self.test_change_password_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n expected_result = self.test_args[\"expected_result\"]\n actual_result = json.loads(response.text)[\"message\"]\n assert actual_result == expected_result, \"Test Failed.. Expected: {0}.. Actual: {1}\".format(expected_result,\n actual_result)", "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16", "async def password(self, ctx):\n pass", "def get_password_testing():\n if settings.CLOUD:\n return [os.environ.get('passwordtest')]\n with open('env.yaml') as file_name:\n data = yaml.safe_load(file_name)\n return (data['test_variables']['password'],)", "def getPassword(self):\n\t\treturn self.Password", "def test_set_user_password(self):\n pass", "def get_verified_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password-again')", "def password(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"password\")", "def test_get_password_from_keyring_if_exists(self, mock_keyring):\n mock_keyring.get_password.return_value = 'TestPass'\n self.assertEqual(\n get_password_from_keyring('TestPass', 'TestUser'), 'TestPass')", "def enter_password(self):", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))", "def get_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password')", "def test_valid_password(self):\n newpass = 'Just Complex Enough'\n m = hashlib.sha512()\n m.update(newpass.encode('utf-8'))\n m.update(self.request.user.salt)\n hashed = m.digest()\n self.request.json_body = deepcopy(self.good_dict)\n self.assertNotEqual(self.request.user.password, hashed)\n self.request.json_body['password'] = newpass\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, dict_from_row(self.request.user, remove_fields=removals))\n self.assertEqual(self.request.user.password, hashed)", "def _get_user_password(self):\n return self.__user_password", "def get_password_data(self, instance_id):\r\n\r\n params = {'InstanceId' : instance_id}\r\n rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST')\r\n return rs.passwordData", "def log_in_password(self):\n password_elem = waiter.find_element(self.driver, 'password', by=NAME)\n return password_elem.get_attribute('value')", "def validate(self):\n if not self.credentials['password']:\n raise ValueError(\n 'An empty z/VM guest password is trying to be used. '\n 'Please set the correct password.')", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def password(self) :\n\t\ttry :\n\t\t\treturn self._password\n\t\texcept Exception as e:\n\t\t\traise e" ]
[ "0.7222929", "0.7099967", "0.68307626", "0.6830078", "0.6776669", "0.6640706", "0.66043776", "0.65847296", "0.65678656", "0.6471251", "0.64595413", "0.6397165", "0.6388326", "0.6383084", "0.6377725", "0.6366573", "0.63620764", "0.63547564", "0.63525546", "0.6336687", "0.6326122", "0.625524", "0.6246864", "0.62463266", "0.6244883", "0.62224275", "0.6221926", "0.6214233", "0.62035114", "0.6197768" ]
0.9478655
0
Test case for aws_service_api_vm_patch
def test_aws_service_api_vm_patch(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_get(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_modify_virtual_service(self):\n pass", "def test_vmcp_02(self):\r\n signature = dict(signature='XX')\r\n with patch('os.path.exists', return_value=True):\r\n with patch('pybossa.vmcp.sign', return_value=signature):\r\n res = self.app.get('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n out = json.loads(res.data)\r\n assert res.status_code == 200, out\r\n assert out['signature'] == signature['signature'], out\r\n\r\n # Now with a post\r\n res = self.app.post('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n assert res.status_code == 405, res.status_code", "def test_aws_service_api_vm_workshift_put(self):\n pass", "def test_ipam_vrfs_update(self):\n pass", "def test_update_vip(self):\r\n resource = 'vip'\r\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_aws_service_api_vm_password_get(self):\n pass", "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_update_vip(self):\n resource = 'vip'\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname',\n '--tags', 'a', 'b'],\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_ipam_services_update(self):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_vmcp_01(self):\r\n # Even though the key does not exists, let's patch it to test\r\n # all the errors\r\n with patch('os.path.exists', return_value=True):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status_code'] == 415, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err\r\n assert err['exception_msg'] == 'cvm_salt parameter is missing'", "def test_aws_service_api_volumes_post(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "def test_ipam_services_partial_update(self):\n pass", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_restart(self):", "def test_aws_service_api_volumes_get(self):\n pass", "def test_virtual_service_create_command_for_success(\n mock_client, virtual_service_create_success, monkeypatch\n):\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine._PCEObjectAPI,\n \"create\",\n lambda *a: VirtualService.from_json(virtual_service_create_success),\n )\n resp = virtual_service_create_command(\n mock_client,\n {\"name\": \"test_create_virtual_service\", \"port\": 3000, \"protocol\": \"tcp\"},\n )\n assert resp.raw_response == virtual_service_create_success", "def test_aws_service_api_volume_get(self):\n pass", "def test_update_task_tags(\n self,\n mock_config_load,\n mock_custom_objects_api,\n mock_core_v1_api\n ):\n task_id = util.MOCK_UUID_5\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"tags\": [\"FEATURE_ENGINEERING\"],\n },\n )\n result = rv.json()\n expected = {\n \"uuid\": \"uuid-5\",\n \"name\": \"task-5\",\n \"description\": None,\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"arguments\": None,\n \"category\": \"MONITORING\",\n \"tags\": [\"FEATURE_ENGINEERING\"],\n \"dataIn\": None,\n \"dataOut\": None,\n \"docs\": None,\n \"hasNotebook\": False,\n \"image\": EXPERIMENT_IMAGE,\n \"memoryLimit\": \"10Gi\",\n \"memoryRequest\": \"2Gi\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": 60,\n \"createdAt\": mock.ANY,\n }\n machine_generated = [\"updatedAt\"]\n for attr in machine_generated:\n self.assertIn(attr, result)\n del result[attr]\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def test_add_virtual_service(self):\n pass" ]
[ "0.82336074", "0.8126507", "0.7836095", "0.77227956", "0.7702922", "0.751186", "0.7425756", "0.7319667", "0.7219776", "0.6920091", "0.68976235", "0.66285187", "0.6533979", "0.6498052", "0.64471275", "0.6408248", "0.63949734", "0.63852394", "0.63488567", "0.6342148", "0.6336607", "0.6221896", "0.6196736", "0.6180206", "0.6143484", "0.6071661", "0.60657644", "0.6038722", "0.60068876", "0.6002275" ]
0.9435803
0
Test case for aws_service_api_vm_security_group_delete
def test_aws_service_api_vm_security_group_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_resource_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def security_group_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group(**kwargs)", "def delete_security_group(self, security_group):\r\n return self.delete(self.security_group_path % (security_group))", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def post_security_group_delete(self, resource_id, resource_dict):\n pass", "def test_user_group_controller_delete(self):\n pass", "def test_delete_groups(self):\n pass", "def delete(self, security_group_id: str) -> None:\n\t\troute = f'{AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value}/{security_group_id}'\n\t\treturn self._delete(route=route)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete(self, sg_id):\n self.client.delete_security_group(sg_id)", "def del_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n response = ec2.delete_security_group(GroupId=sgid)\n attachment = MessageAttachmentsClass()\n message.message_text = \"Security group deleted\"\n message.attach(attachment)\n\n return message.to_json()", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def _delete_security_group(self, group_id):\n\n group_to_delete = self.get_resource()\n\n if not group_to_delete:\n raise NonRecoverableError(\n 'Unable to delete security group {0}, because the group '\n 'does not exist in the account'.format(group_id))\n\n try:\n self.execute(self.client.delete_security_group,\n dict(group_id=group_id), raise_on_falsy=True)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))", "def pre_security_group_delete(self, resource_id):\n pass", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_collection_group(self):\n pass", "def delete_sec_group(ec2, sec_group_name):\n try:\n ec2.delete_security_group(sec_group_name)\n except EC2ResponseError as e:\n if e.error_code == 'InvalidGroup.NotFound':\n pass\n else:\n raise e", "def test_ipam_vlan_groups_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete(self, oid):\n path = '%s/security-groups/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group: %s' % truncate(res))\n return res[0]", "def test_delete_underscore():\n pytest.raises(SaltInvocationError, mac_group.delete, \"_Test\")", "def test_groups_group_ref_delete(self):\n pass", "def delete_group(user):\n return 'do some magic!'" ]
[ "0.7839854", "0.7500313", "0.7500313", "0.7480745", "0.7460259", "0.7452618", "0.743292", "0.7414543", "0.73841035", "0.7381263", "0.73142505", "0.71547073", "0.7150299", "0.7143438", "0.7121918", "0.71215004", "0.707065", "0.7057178", "0.705154", "0.7003657", "0.69980025", "0.6935143", "0.6928709", "0.69084316", "0.69084316", "0.68750626", "0.6815185", "0.6788934", "0.6779889", "0.6768188" ]
0.96059275
0
Test case for aws_service_api_vm_security_group_put
def test_aws_service_api_vm_security_group_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def test_08_security_group(self):\n # Validate the following:\n # 1. Create a project\n # 2. Assign some security groups to that project\n # 3. Verify the security groups can only be assigned to VM belonging\n # to that project.\n\n security_group = SecurityGroup.create(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.debug(\"Created security group with ID: %s\" % security_group.id)\n # Default Security group should not have any ingress rule\n sercurity_groups = SecurityGroup.list(\n self.apiclient,\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(sercurity_groups, list),\n True,\n \"Check for list security groups response\"\n )\n\n self.assertNotEqual(\n len(sercurity_groups),\n 0,\n \"Check List Security groups response\"\n )\n # Authorize Security group to SSH to VM\n ingress_rule = security_group.authorize(\n self.apiclient,\n self.services[\"security_group\"],\n projectid=self.project.id\n )\n self.assertEqual(\n isinstance(ingress_rule, dict),\n True,\n \"Check ingress rule created properly\"\n )\n\n self.debug(\n \"Authorizing ingress rule for sec group ID: %s for ssh access\"\n % security_group.id)\n self.virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n securitygroupids=[security_group.id],\n projectid=self.project.id\n )\n self.debug(\"Deployed VM (ID: %s) in project: %s\" % (\n self.virtual_machine.id,\n self.project.id\n ))\n self.assertEqual(\n self.virtual_machine.state,\n 'Running',\n \"VM state should be running after deployment\"\n )\n # Deploy another VM with same security group outside the project\n self.debug(\n \"Deploying VM with security group: %s outside project:%s\" % (\n security_group.id,\n self.project.id\n ))\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n serviceofferingid=self.service_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n securitygroupids=[security_group.id],\n )\n return", "def test_aws_service_api_security_groups_get(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_update_resource_group(self):\n pass", "def test_groups_group_ref_put(self):\n pass", "def test_break_security_group_failed():", "def test_api_v1_groups_id_put(self):\n pass", "def test_break_security_group_usual_case_specify_sg():", "def test_resource_user_resource_set_user_groups_for_user_put(self):\n pass", "def test_products_ref_groups_put(self):\n pass", "def post_security_group_create(self, resource_dict):\n pass", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def post_security_group_update(self, resource_id, resource_dict):\n pass", "def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def test_groups_group_id_state_put(self):\n pass", "def test_create_resource_group(self):\n pass", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_break_security_group_usual_case():", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def test_patch_resource_group(self):\n pass", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def test_add_group(self):\n pass", "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def test_update_group(self):\n pass" ]
[ "0.7739094", "0.73521936", "0.7036603", "0.6653077", "0.6653077", "0.6556714", "0.65383494", "0.65037537", "0.64947385", "0.6492873", "0.6491554", "0.6489214", "0.6464362", "0.63628876", "0.63405657", "0.63301015", "0.6310847", "0.6285586", "0.627555", "0.62239367", "0.62221885", "0.62221885", "0.62118495", "0.62041456", "0.6203335", "0.61951506", "0.61832035", "0.61723554", "0.6162166", "0.6145795" ]
0.95949286
0
Test case for aws_service_api_vm_tag_put
def test_aws_service_api_vm_tag_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_command_put(self):\n pass", "def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})", "def test_networking_project_network_tag_put(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_update_task_tag(self, mock_api, mock_custom_objects_api, mock_kube_config):\n task_id = util.MOCK_UUID_5\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"tags\": [\n \"tag1\",\n \"tag2\",\n \"tag3\",\n \"tag4\",\n ]\n },\n )\n self.assertEqual(rv.status_code, 200)", "def test_put(self):\n self.seed_static_data()\n\n params = {\n 'id': 2,\n 'event_id': 1,\n 'tag_type': 'REGISTRATION',\n 'name': {\n 'en': 'Renamed English Name', # Rename\n 'zu': 'Zulu Name'\n },\n 'description': {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n },\n 'active': True\n }\n\n response = self.app.put(\n '/api/v1/tag', \n headers=self.user1_headers, \n data=json.dumps(params),\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data={'id': 2, 'event_id': 1, 'language': 'en'})\n data = json.loads(response.data)\n\n self.assertEqual(data['id'], 2)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'REGISTRATION')\n self.assertDictEqual(data['name'], {\n 'en': 'Renamed English Name',\n 'zu': 'Zulu Name'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'Renamed English Description',\n 'zu': 'Zulu Description'\n })", "def test_aws_service_api_vm_get(self):\n pass", "def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})", "def test_aws_service_api_vm_workshift_put(self):\n pass", "def put_object_tagging(Bucket=None, Key=None, VersionId=None, ContentMD5=None, Tagging=None):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_update_vip(self):\r\n resource = 'vip'\r\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_update_task_tags(\n self,\n mock_config_load,\n mock_custom_objects_api,\n mock_core_v1_api\n ):\n task_id = util.MOCK_UUID_5\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"tags\": [\"FEATURE_ENGINEERING\"],\n },\n )\n result = rv.json()\n expected = {\n \"uuid\": \"uuid-5\",\n \"name\": \"task-5\",\n \"description\": None,\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"arguments\": None,\n \"category\": \"MONITORING\",\n \"tags\": [\"FEATURE_ENGINEERING\"],\n \"dataIn\": None,\n \"dataOut\": None,\n \"docs\": None,\n \"hasNotebook\": False,\n \"image\": EXPERIMENT_IMAGE,\n \"memoryLimit\": \"10Gi\",\n \"memoryRequest\": \"2Gi\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": 60,\n \"createdAt\": mock.ANY,\n }\n machine_generated = [\"updatedAt\"]\n for attr in machine_generated:\n self.assertIn(attr, result)\n del result[attr]\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_update_vip(self):\n resource = 'vip'\n cmd = vip.UpdateVip(test_cli20.MyApp(sys.stdout), None)\n self._test_update_resource(resource, cmd, 'myid',\n ['myid', '--name', 'myname',\n '--tags', 'a', 'b'],\n {'name': 'myname', 'tags': ['a', 'b'], })", "def test_update_task_unk_tags(\n self,\n mock_config_load,\n mock_custom_objects_api,\n mock_core_v1_api\n ):\n task_id = util.MOCK_UUID_5\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"tags\": [\"UNK\"],\n },\n )\n result = rv.json()\n expected = {\n \"arguments\": None,\n \"category\": \"MONITORING\",\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": False,\n \"image\": EXPERIMENT_IMAGE,\n \"memoryLimit\": \"10Gi\",\n \"memoryRequest\": \"2Gi\",\n \"name\": \"task-5\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": 60,\n \"tags\": [\"UNK\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": \"uuid-5\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def update_vm(client, resource_group_name, vm_name, **kwargs):\n return client.update(resource_group_name, vm_name, kwargs['parameters'].tags)", "def test_aws_service_api_vm_management_get(self):\n pass", "def update_tag(id):\n try:\n data_tag = JSONPayload(UpdateTagInterface)\n UpdateTag().run(id, data_tag)\n except BadRequest as ex:\n return jsonify({'code': '400','message':'Invalide json.'})\n except NotFound as ex:\n return jsonify({'code': '404','message': 'Tag not found'})\n except Exception as ex:\n print(type(ex))\n print(ex)\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code':'200','message':'Updated tag with sucess.'})", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def test_kyc_put_request(self):\n pass", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def put_bucket_tagging(Bucket=None, Tagging=None):\n pass", "def put(self, tag, tile, ttl=0):\n raise NotImplementedError" ]
[ "0.80429476", "0.729708", "0.70491844", "0.6841491", "0.6683153", "0.6553686", "0.6502497", "0.6415904", "0.64025015", "0.63829607", "0.63378245", "0.63253623", "0.61899245", "0.61463225", "0.6143658", "0.60995877", "0.5990448", "0.5973975", "0.59707505", "0.59622836", "0.59558004", "0.5948442", "0.5946315", "0.59346396", "0.59330845", "0.5910453", "0.5885144", "0.5877971", "0.5876519", "0.5869745" ]
0.96140796
0
Test case for aws_service_api_vm_workshift_delete
def test_aws_service_api_vm_workshift_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_workshift_put(self):\n pass", "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_delete_workout(self):\n response = self.client.open(\n '/workout/{id}'.format(id='id_example'),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_shift_view_delete(self):\n \n path = reverse('api:id-shifts', kwargs={'id': self.shift.id})\n request = self.factory.delete(path)\n response = ShiftView.delete(self, request, id=self.shift.id)\n assert response.status_code == 204\n assert Shift.objects.filter(id=self.shift.id).count() == 0", "def test_shift_view_delete_invalid_shift(self):\n \n path = reverse('api:id-shifts', kwargs={'id': 9999})\n request = self.factory.delete(path)\n response = ShiftView.delete(self, request, id=9999)\n assert response.status_code == 404", "def test_02_delete_machine(self):\n client = self.client\n\n j = check_json(client, 'api/db_default/v4/nts/machines/2')\n run_ids = [s['id'] for s in j['runs']]\n self.assertNotEqual(len(run_ids), 0)\n sample_ids = []\n for run_id in run_ids:\n resp = check_json(client,\n 'api/db_default/v4/nts/runs/{}'.format(run_id))\n sample_ids.append([s['id'] for s in resp['tests']])\n self.assertNotEqual(len(sample_ids), 0)\n\n resp = client.delete('api/db_default/v4/nts/machines/2')\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/machines/2',\n headers={'AuthToken': 'wrong token'})\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/machines/2',\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.get_data(),\n '''Deleting runs 3 5 6 7 8 9 (6/6)\nDeleted machine machine2:2\n''')\n\n resp = client.get('api/db_default/v4/nts/machines/2')\n self.assertEqual(resp.status_code, 404)\n\n for run_id in run_ids:\n resp = client.get('api/db_default/v4/nts/runs/{}'.format(run_id))\n self.assertEqual(resp.status_code, 404)\n\n for sid in sample_ids:\n resp = client.get('api/db_default/v4/nts/samples/{}'.format(sid))\n self.assertEqual(resp.status_code, 404)", "def test_issue_delete_stop_watch(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_delete_deployment(self):\n pass", "def command_delete_redshift():\n # trying to get the keys from dwh.cfg file\n try: \n config = configparser.ConfigParser()\n config.read('aws-dwh.cfg')\n cluster_id = config['DWH']['DWH_CLUSTER_IDENTIFIER']\n role_name = config['DWH']['DWH_IAM_ROLE_NAME']\n except Exception as e:\n print(\"Encountered following exception while trying to retrieve parameters from aws-dwh.cfg file\")\n print(f\"{e}\")\n sys.exit(1)\n\n if aws.delete_redshift_cluster(cluster_id=cluster_id,\n role_name=role_name\n ):\n \n print(f\"delete_redshift command successful for cluster {cluster_id}\")\n print(f\"cleaning up roles used for this cluster\")\n \n if aws.cleanup_redshift_role(role_name=role_name\n ):\n print(f\"Cleanup of role {role_name} successful\")\n else:\n print(f\"Cleanup of role {role_name} failed.\")\n else:\n print(f\"delete_redshift command failed for cluster {cluster_id}\")", "def step_delete(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console delete --resource-group {resourceGroup} \"\n \"--virtual-machine-name {virtualMachineName} --yes\",\n checks=checks,\n )", "def test_delete_run(self):\n pass", "def test_delete_case(self):\n pass", "def test_delete_monitoring_schedule_vendor_v3(self):\n pass", "def test_delete_virtual_service(self):\n pass", "def test_deleting_a_segment(self):\n pass", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def test_delete_hyperflex_cluster_profile(self):\n pass", "def test_delete_no_target(self):\n # login as library manager\n self.authenticate(self.user)\n\n # remove all works\n Work.objects.all().delete()\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 0})", "def test_delete7(self):\n pass", "def test_delete(self):\n # login as library manager\n self.authenticate(self.user)\n\n # check there are 3 works\n self.assertEqual(Work.objects.count(), 3)\n\n self.assertNotEqual(self.work1.song_set.count(), 0)\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 2})\n\n # check there are only 1 work remaining\n self.assertEqual(Work.objects.count(), 1)\n\n # check artists with songs remains\n self.assertEqual(Work.objects.filter(pk=self.work2.pk).count(), 0)\n self.assertEqual(Work.objects.filter(pk=self.work3.pk).count(), 0)", "def test_workflows_id_delete(self):\n pass", "def delete_workteam(WorkteamName=None):\n pass", "def test_004_delete(self):\n ret = svcmgr.main(argv=[\"delete\", \"-s\", SVCNAME, \"--local\"])\n assert ret == 0", "def test_teams_remove_customer_from_workgroup_v1(self):\n pass", "def test_remove_vm(self, instance_name):\n self.instances.pop(instance_name)", "def test_delete__compute(self):\n arglist = [\n '--compute',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'compute'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.volume_quotas_mock.delete.assert_not_called()\n self.network_mock.delete_quota.assert_not_called()", "def test_delete_hyperflex_server_model(self):\n pass" ]
[ "0.7511097", "0.7364616", "0.73206496", "0.67302877", "0.66817266", "0.6495581", "0.64496297", "0.64332926", "0.6427471", "0.6413648", "0.6361137", "0.6328748", "0.6317846", "0.6265027", "0.61935854", "0.612659", "0.61088544", "0.60996175", "0.6056947", "0.60172695", "0.60065806", "0.5993544", "0.59730256", "0.5935434", "0.59280145", "0.5921994", "0.5914118", "0.5893663", "0.58650947", "0.5809876" ]
0.9626205
0
Test case for aws_service_api_vm_workshift_post
def test_aws_service_api_vm_workshift_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_workshift_put(self):\n pass", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_shift_view_post(self):\n path = reverse('api:get-shifts')\n request = self.factory.post(path)\n force_authenticate(request, user=self.user_employer)\n request.user = self.user_employer\n request.data = {\n 'status': 'OPEN',\n 'starting_at': \"2019-10-20T00:00\",\n 'ending_at': \"2019-10-20T00:00\",\n #'rating': 3,\n #'candidates': [self.employee.id],\n #'employees': [self.employee.id],\n 'venue': self.venue.id,\n 'position': self.position.id,\n 'application_restriction': 'FAVORITES',\n 'maximum_allowed_employees': 10,\n 'minimum_hourly_rate': 8,\n 'minimum_allowed_rating': 3,\n 'allowed_from_list': [self.favlist.id],\n }\n response = ShiftView.post(self, request)\n assert response.status_code == 201\n assert Shift.objects.filter(status='OPEN').count() == 1", "def test_deployvm_userdata_post(self):\n deployVmResponse = VirtualMachine.create(\n self.apiClient,\n services=self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=self.template.id,\n zoneid=self.zone.id,\n method='POST'\n )\n vms = list_virtual_machines(\n self.apiClient,\n account=self.account.name,\n domainid=self.account.domainid,\n id=deployVmResponse.id\n )\n self.assertTrue(len(vms) > 0, \"There are no Vms deployed in the account %s\" % self.account.name)\n vm = vms[0]\n self.assertTrue(vm.id == str(deployVmResponse.id), \"Vm deployed is different from the test\")\n self.assertTrue(vm.state == \"Running\", \"VM is not in Running state\")", "def test_add_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def push_to_db(self):\n try:\n if self.is_skipped:\n return TestCase.EX_PUSH_TO_DB_ERROR\n assert self.project_name\n assert self.case_name\n assert self.start_time\n assert self.stop_time\n url = env.get('TEST_DB_URL')\n data = {\"project_name\": self.project_name,\n \"case_name\": self.case_name,\n \"details\": self.details}\n data[\"installer\"] = env.get('INSTALLER_TYPE')\n data[\"scenario\"] = env.get('DEPLOY_SCENARIO')\n data[\"pod_name\"] = env.get('NODE_NAME')\n data[\"build_tag\"] = env.get('BUILD_TAG')\n data[\"criteria\"] = 'PASS' if self.is_successful(\n ) == TestCase.EX_OK else 'FAIL'\n data[\"start_date\"] = datetime.fromtimestamp(\n self.start_time).strftime('%Y-%m-%d %H:%M:%S')\n data[\"stop_date\"] = datetime.fromtimestamp(\n self.stop_time).strftime('%Y-%m-%d %H:%M:%S')\n try:\n data[\"version\"] = re.search(\n TestCase._job_name_rule,\n env.get('BUILD_TAG')).group(2)\n except Exception: # pylint: disable=broad-except\n data[\"version\"] = \"unknown\"\n req = requests.post(\n url, data=json.dumps(data, sort_keys=True),\n headers=self.headers)\n req.raise_for_status()\n if urlparse(url).scheme != \"file\":\n # href must be postprocessed as OPNFV testapi is misconfigured\n # (localhost is returned)\n uid = re.sub(r'^.*/api/v1/results/*', '', req.json()[\"href\"])\n netloc = env.get('TEST_DB_EXT_URL') if env.get(\n 'TEST_DB_EXT_URL') else env.get('TEST_DB_URL')\n self.__logger.info(\n \"The results were successfully pushed to DB: \\n\\n%s\\n\",\n os.path.join(netloc, uid))\n except AssertionError:\n self.__logger.exception(\n \"Please run test before publishing the results\")\n return TestCase.EX_PUSH_TO_DB_ERROR\n except requests.exceptions.HTTPError:\n self.__logger.exception(\"The HTTP request raises issues\")\n return TestCase.EX_PUSH_TO_DB_ERROR\n except Exception: # pylint: disable=broad-except\n self.__logger.exception(\"The results cannot be pushed to DB\")\n return TestCase.EX_PUSH_TO_DB_ERROR\n return TestCase.EX_OK", "def test_workflows_post(self):\n pass", "def test_aws_service_api_snapshots_post(self):\n pass", "def test_smoker_post(self):\n pass", "def PostWorkflowResults(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_post_work_simple(self):\n # pre-assert there are 3 works\n self.assertEqual(Work.objects.all().count(), 3)\n\n # authenticate as manager\n self.authenticate(self.manager)\n\n # create work\n response = self.client.post(\n self.url,\n {\n \"title\": \"Girls und Panzer\",\n \"subtitle\": \"\",\n \"work_type\": {\"query_name\": \"anime\"},\n },\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # assert there are now 4 works\n self.assertEqual(Work.objects.all().count(), 4)", "def test_simengine_rest_snapshot_simulation_node_post(self):\n pass", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_submissions(self):\r\n # Basic case, things go well.\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"submitted\")\r\n\r\n # We post, but Software Secure doesn't like what we send for some reason\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_error):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")\r\n\r\n # We try to post, but run into an error (in this case a newtork connection error)\r\n with patch('verify_student.models.requests.post', new=mock_software_secure_post_unavailable):\r\n attempt = self.create_and_submit()\r\n assert_equals(attempt.status, \"must_retry\")", "def test_shift_view_put(self):\n path = reverse('api:id-shifts', kwargs={'id': self.shift.id})\n request = self.factory.put(path)\n force_authenticate(request, user=self.user_employer)\n request.user = self.user_employer\n request.data = {\n 'status': 'CANCELLED',\n 'starting_at': \"2019-10-20T00:00\",\n 'ending_at': \"2019-10-20T00:00\",\n 'rating': 4,\n 'candidates': [],\n 'employees': [],\n 'venue': self.venue.id,\n 'position': self.position.id,\n 'application_restriction': 'ANYONE',\n 'maximum_allowed_employees': 20,\n 'minimum_hourly_rate': 10,\n 'minimum_allowed_rating': 1,\n 'allowed_from_list': [],\n }\n response = ShiftView.put(self, request, id=self.shift.id)\n assert response.status_code == 200\n shift = Shift.objects.get(id=self.shift.id)\n assert shift.status == request.data['status']\n assert shift.starting_at.strftime(\"%Y-%m-%dT%H:%M\") == request.data['starting_at']\n assert shift.ending_at.strftime(\"%Y-%m-%dT%H:%M\") == request.data['ending_at']\n assert shift.rating == request.data['rating']\n assert shift.candidates.count() == len(request.data['candidates'])\n \n self.assertEquals(shift.venue.id, request.data['venue'])\n assert shift.position.id == request.data['position']\n assert shift.application_restriction == request.data['application_restriction']", "def test_aws_service_api_vm_patch(self):\n pass", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def test_vmcp_02(self):\r\n signature = dict(signature='XX')\r\n with patch('os.path.exists', return_value=True):\r\n with patch('pybossa.vmcp.sign', return_value=signature):\r\n res = self.app.get('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n out = json.loads(res.data)\r\n assert res.status_code == 200, out\r\n assert out['signature'] == signature['signature'], out\r\n\r\n # Now with a post\r\n res = self.app.post('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n assert res.status_code == 405, res.status_code", "def test_create_monitoring_success(\n self,\n mock_kfp_client,\n ):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings\",\n json={\n \"taskId\": task_id,\n },\n )\n result = rv.json()\n expected = {\n \"createdAt\": mock.ANY,\n \"deploymentId\": deployment_id,\n \"taskId\": task_id,\n \"task\": {\"name\": util.MOCK_TASK_NAME_1, \"tags\": []},\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)\n\n mock_kfp_client.assert_any_call(host=\"http://ml-pipeline.kubeflow:8888\")", "def test_03_post_run(self):\n client = self.client\n\n resp = client.get('api/db_default/v4/nts/runs/5')\n self.assertEqual(resp.status_code, 404)\n\n data = open('%s/sample-report.json' % self.shared_inputs).read()\n\n resp = client.post('api/db_default/v4/nts/runs', data=data)\n self.assertEqual(resp.status_code, 401)\n\n resp = client.post('api/db_default/v4/nts/runs', data=data,\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 301)\n self.assertIn('http://localhost/api/db_default/v4/nts/runs/', resp.headers['Location'])\n resp_json = json.loads(resp.data)\n self.assertEqual(resp_json['run_id'], 5)\n\n # Provoke a failing submission.\n resp = client.post('api/db_default/v4/nts/runs?merge=reject',\n data=data,\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 400)\n resp_json = json.loads(resp.data)\n self.assertEqual(resp_json['error'],\n \"import failure: Duplicate submission for '1'\")\n self.assertEqual(resp_json['success'], False)", "def test_post_work_type(self):\n # pre-assert there are 2 work types\n self.assertEqual(WorkType.objects.all().count(), 2)\n\n # authenticate as manager\n self.authenticate(self.manager)\n\n # create work type\n response = self.client.post(\n self.url, {\"name\": \"wt3\", \"name_plural\": \"wt3s\", \"query_name\": \"wt3\"}\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # assert there are now 3 work types\n self.assertEqual(WorkType.objects.all().count(), 3)", "def test_create_machine(self, pretty_print, owner_api_token):\n create_machine_request = setup_data.get('create_machine', {}).get(\n 'request_body') or json.loads(\"\"\"{\n \"template\" : \"{}\",\n \"image\" : \"Debian\",\n \"quantity\" : 1.4658129805029452,\n \"disks\" : {\n \"disk_size\" : 0,\n \"disk_path\" : \"disk_path\"\n },\n \"fqdn\" : \"fqdn\",\n \"cloudinit\" : \"cloudinit\",\n \"volumes\" : \"\",\n \"save\" : true,\n \"dry\" : true,\n \"monitoring\" : true,\n \"tags\" : \"{}\",\n \"cloud\" : \"cloud\",\n \"size\" : \"m1.small\",\n \"optimize\" : \"optimize\",\n \"schedules\" : [ \"\", \"\" ],\n \"extra\" : \"\",\n \"name\" : \"DB mirror\",\n \"location\" : \"\",\n \"expiration\" : {\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\n \"action\" : \"stop\",\n \"notify\" : {\n \"period\" : \"minutes\",\n \"value\" : 1\n },\n \"notify_msg\" : \"notify_msg\"\n },\n \"net\" : \"\",\n \"scripts\" : [ \"\", \"\" ],\n \"key\" : \"\"\n}\"\"\", strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri,\n json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'reboot': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def post(self):\n created = post_tool(request.json)\n return created, 201", "def test_execute_deployment(self):\n pass", "def test_shift_view_post_invalid_data(self):\n path = reverse('api:get-shifts')\n request = self.factory.post(path)\n force_authenticate(request, user=self.user_employer)\n request.user = self.user_employer\n request.data = {\n 'status': 'OPEN'\n }\n response = ShiftView.post(self, request)\n assert response.status_code == 400", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def post_virtual_machine_create(self, resource_dict):\n pass", "def post(self, *args, **kwargs):\n body = {}\n # This is setting the code to success by default\n status_code = 201\n\n body_data = json.loads(self.json_body)\n\n try:\n stack_name = body_data['stack_name']\n automation_name = body_data['automation_name']\n tenant_name = body_data['tenant_name']\n except KeyError as a_error:\n error_msg = 'Missing Required Data. Error: %s' % (str(a_error))\n raise GatewayAPIException(status_code=400, reason=error_msg)\n\n try:\n # Step One - Query Automation Database for Automation Provisioning Data\n if self.provision_provider:\n automation_data = yield self.provision_provider.get_automation_data(automation_name=automation_name)\n body = {\"results\": automation_data}\n # Step Two - Trigger Automation determined from Automation Provisioning data\n result = self.provision_provider.trigger_automation(stack_name=stack_name, automation_data=automation_data, tenant_name=tenant_name)\n if result['result']:\n msg = 'Successfully kicked off automation for stack: %s at tenant name: %s' % (stack_name, tenant_name,)\n body = {\"results\": msg}\n else:\n msg = 'Failed to kick off automation for stack: %s at tenant name: %s' % (stack_name, tenant_name,)\n body = {\"results\": msg}\n else:\n LOGGER.error('Provision Provider is None.')\n raise Exception('Internal Coding Error.')\n except Exception as an_error:\n error_msg = str(an_error)\n LOGGER.exception(error_msg)\n raise GatewayAPIException(status_code=400, reason=error_msg)\n\n self.set_status(status_code)\n self.write(body)\n self.finish()" ]
[ "0.84547627", "0.7423614", "0.68335587", "0.617915", "0.6070825", "0.5989244", "0.5934529", "0.5797156", "0.5796808", "0.5699866", "0.5630047", "0.5624665", "0.55860275", "0.55702406", "0.5515915", "0.54990745", "0.5466368", "0.5455596", "0.5427891", "0.53978723", "0.53920615", "0.53851867", "0.5366696", "0.53356934", "0.53249985", "0.5324748", "0.5323602", "0.53150326", "0.5286133", "0.52707285" ]
0.94778556
0
Test case for aws_service_api_vm_workshift_put
def test_aws_service_api_vm_workshift_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_shift_view_put(self):\n path = reverse('api:id-shifts', kwargs={'id': self.shift.id})\n request = self.factory.put(path)\n force_authenticate(request, user=self.user_employer)\n request.user = self.user_employer\n request.data = {\n 'status': 'CANCELLED',\n 'starting_at': \"2019-10-20T00:00\",\n 'ending_at': \"2019-10-20T00:00\",\n 'rating': 4,\n 'candidates': [],\n 'employees': [],\n 'venue': self.venue.id,\n 'position': self.position.id,\n 'application_restriction': 'ANYONE',\n 'maximum_allowed_employees': 20,\n 'minimum_hourly_rate': 10,\n 'minimum_allowed_rating': 1,\n 'allowed_from_list': [],\n }\n response = ShiftView.put(self, request, id=self.shift.id)\n assert response.status_code == 200\n shift = Shift.objects.get(id=self.shift.id)\n assert shift.status == request.data['status']\n assert shift.starting_at.strftime(\"%Y-%m-%dT%H:%M\") == request.data['starting_at']\n assert shift.ending_at.strftime(\"%Y-%m-%dT%H:%M\") == request.data['ending_at']\n assert shift.rating == request.data['rating']\n assert shift.candidates.count() == len(request.data['candidates'])\n \n self.assertEquals(shift.venue.id, request.data['venue'])\n assert shift.position.id == request.data['position']\n assert shift.application_restriction == request.data['application_restriction']", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_shift_view_put_invalid_update(self):\n path = reverse('api:id-shifts', kwargs={'id': self.shift.id})\n request = self.factory.put(path)\n force_authenticate(request, user=self.user_employer)\n request.user = self.user_employer\n request.data = {\n 'status': None\n }\n response = ShiftView.put(self, request, id=self.shift.id)\n assert response.status_code == 400", "def test_shift_view_put_not_found(self):\n path = reverse('api:id-shifts', kwargs={'id': 9999})\n request = self.factory.put(path)\n request.data = {\n 'status': 'OPEN'\n }\n response = ShiftView.put(self, request, id=9999)\n assert response.status_code == 404", "def test_put_work_simple(self):\n # pre-assert there are 3 works\n self.assertEqual(Work.objects.all().count(), 3)\n\n # authenticate as manager\n self.authenticate(self.manager)\n\n # create work\n response = self.client.put(\n self.url_work1,\n {\n \"title\": \"Girls und Panzer\",\n \"subtitle\": \"\",\n \"work_type\": {\"query_name\": \"anime\"},\n },\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # assert there are now 4 works\n self.assertEqual(Work.objects.all().count(), 3)", "def test_put_monitoring_schedule_vendor_v3(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_update_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout/{id}'.format(id='id_example'),\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_kyc_put_request(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_kyc_put_request_legal(self):\n pass", "def test_workflows_id_put(self):\n pass", "def test_update_bucket(self):\n pass", "def test_put_from_another_way(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification', kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_add_workout(self):\n body = Workout()\n response = self.client.open(\n '/workout',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_change_provisioned_throughput_usual_case():", "def test_puthardwares_item(self):\n pass", "def test_aws_service_api_vm_get(self):\n pass", "def test_00_update_machine(self):\n client = self.client\n\n # We are going to set the 'os' field to none, remove the 'uname'\n # parameter and add the 'new_parameter' parameter.\n # Make sure none of those things happened yet:\n machine_before = check_json(client, 'api/db_default/v4/nts/machines/1')\n machine_before = machine_before['machine']\n self.assertIsNotNone(machine_before.get('os', None))\n self.assertIsNone(machine_before.get('new_parameter', None))\n self.assertIsNotNone(machine_before.get('uname', None))\n\n data = {\n 'machine': {\n 'hardware': 'hal 9000',\n 'os': None,\n 'hostname': 'localhost',\n 'new_parameter': True,\n },\n }\n json_data = json.dumps(data)\n resp = client.put('api/db_default/v4/nts/machines/1', data=json_data,\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n\n machine_after = check_json(client, 'api/db_default/v4/nts/machines/1')\n machine_after = machine_after['machine']\n for key in ('hardware', 'os', 'hostname', 'new_parameter', 'uname'):\n self.assertEquals(machine_after.get(key, None),\n data['machine'].get(key, None))", "def test_put_monitoring_schedule_manufacturer_v3(self):\n pass", "def test_kyc_put_legal(self):\n pass", "def test_update_risk_profile_using_put(self):\n pass", "def test_wallets_put(self):\n pass", "def test_put_work_embedded(self):\n # pre-assert there are 3 works\n self.assertEqual(Work.objects.all().count(), 3)\n\n # authenticate as manager\n self.authenticate(self.manager)\n\n # create work\n response = self.client.put(\n self.url_work1,\n {\n \"title\": \"Girls und Panzer\",\n \"subtitle\": \"\",\n \"alternative_titles\": [{\"title\": \"Galupan\"}, {\"title\": \"Garupan\"}],\n \"work_type\": {\"query_name\": \"anime\"},\n },\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # assert there are now 4 works\n self.assertEqual(Work.objects.all().count(), 3)\n\n # assert embedded data\n work = Work.objects.get(title=\"Girls und Panzer\")\n self.assertEqual(work.alternative_titles.count(), 2)\n self.assertEqual(work.alternative_titles.all()[0].title, \"Galupan\")\n self.assertEqual(work.alternative_titles.all()[1].title, \"Garupan\")", "def test_put_wrong_data(self):\n new_data = {\"fromMonth\": \"another\"}\n response = self.client.put(self.url + str(self.current_data[-1]['id']) + '/', data=json.dumps(new_data),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.content.decode())" ]
[ "0.86835265", "0.7789825", "0.74265546", "0.64560974", "0.64510584", "0.62108296", "0.61423326", "0.6033521", "0.60048115", "0.5985862", "0.59651035", "0.59144586", "0.5877153", "0.5786453", "0.5764561", "0.57141596", "0.56339496", "0.560271", "0.54617304", "0.54551876", "0.5451276", "0.5420654", "0.5416868", "0.54127455", "0.54122704", "0.5395443", "0.5365019", "0.5341894", "0.5336158", "0.5332302" ]
0.9653785
0
Test case for aws_service_api_vms_get
def test_aws_service_api_vms_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vm_get(self):\n pass", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_virtualservice_get(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def getVMs(**kwargs):\n proxy_url = kwargs[\"proxy\"]\n session_token = kwargs[\"sessiontoken\"]\n\n json_response = get_vms_json(proxy_url, session_token)\n\n if json_response == None:\n sys.exit(1)\n\n extracted_dictionary = json_response['results']\n table = PrettyTable(['Display_Name', 'Status', 'External_ID'])\n for i in extracted_dictionary:\n table.add_row([i['display_name'], i['power_state'], i['external_id']])\n print(\"Virtual Machine List:\")\n print(table)", "def test_get_all_virtualservices(self,setup_suite):\n _, resp = get('virtualservice')\n vs_obj_list = resp['results']\n for vs_obj in vs_obj_list:\n logger.info(\" >>> VS Name: %s <<<\" % vs_obj['name'])", "def get_vms(self):\n\n raise NotImplementedError", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_flavors_get(self):\n pass", "def test_get_virtual_service(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_05_vmcp(self):\r\n url = '/api/vmcp'\r\n self.check_limit(url, 'get', 'app')", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def find_vms(self, name):\n script = (\n 'Get-SCVirtualMachine -Name \\\"{}\\\" -VMMServer $scvmm_server')\n data = self.get_json(script.format(name))\n # Check if the data returned to us was a list or 1 dict. Always return a list\n if not data:\n return []\n elif isinstance(data, list):\n return [SCVirtualMachine(system=self, raw=vm_data) for vm_data in data]\n return [SCVirtualMachine(system=self, raw=data)]", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "def test_get_virtualization_realms(self):\n pass", "def test_aws_service_api_flavor_get(self):\n pass", "def test_volumes_get(self):\n pass", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def test_vmcp_01(self):\r\n # Even though the key does not exists, let's patch it to test\r\n # all the errors\r\n with patch('os.path.exists', return_value=True):\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status_code'] == 415, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err\r\n assert err['exception_msg'] == 'cvm_salt parameter is missing'", "def info(self, name=None):\n data = self.cloudman.list_servers(filters={'name': name})\n\n \"\"\"\n vms = self.list()\n print (\"VMS\", vms)\n data = None\n for entry in vms:\n print (\"FFF\", entry['name'])\n if entry['name'] == name:\n data = entry\n break\n \"\"\"\n\n if data is None:\n raise ValueError(f\"vm not found {name}\")\n\n r = self.update_dict(data, kind=\"vm\")\n return r", "def test_aws_service_api_vm_password_get(self):\n pass", "def list(self):\n return self._list('/os-psvm', 'psvms')", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_ipam_services_list(self):\n pass", "def test_get_cloud_resources(self):\n pass" ]
[ "0.86987966", "0.8631125", "0.842901", "0.7912379", "0.693028", "0.69067043", "0.6821294", "0.6698808", "0.66339666", "0.6614842", "0.6563548", "0.65617937", "0.654087", "0.64765596", "0.6475945", "0.64094234", "0.63141346", "0.6279449", "0.6171911", "0.6146017", "0.61012304", "0.60914564", "0.6058071", "0.60044795", "0.5972979", "0.5959259", "0.5918756", "0.5909951", "0.5861546", "0.5841748" ]
0.9503802
0
Test case for aws_service_api_vms_post
def test_aws_service_api_vms_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_vm_workshift_post(self):\n pass", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_aws_service_api_vm_tag_put(self):\n pass", "def test_deployvm_userdata_post(self):\n deployVmResponse = VirtualMachine.create(\n self.apiClient,\n services=self.services[\"virtual_machine\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=self.template.id,\n zoneid=self.zone.id,\n method='POST'\n )\n vms = list_virtual_machines(\n self.apiClient,\n account=self.account.name,\n domainid=self.account.domainid,\n id=deployVmResponse.id\n )\n self.assertTrue(len(vms) > 0, \"There are no Vms deployed in the account %s\" % self.account.name)\n vm = vms[0]\n self.assertTrue(vm.id == str(deployVmResponse.id), \"Vm deployed is different from the test\")\n self.assertTrue(vm.state == \"Running\", \"VM is not in Running state\")", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_aws_service_api_vm_get(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def test_aws_service_api_vm_delete(self):\n pass", "def test_aws_service_api_snapshots_post(self):\n pass", "def post(request):\n # load request json\n try:\n request_content = json.loads(request.body)\n except JSONDecodeError as e:\n return failed(status=1000001)\n\n # validate request data\n schema = SCHEMA.copy()\n schema['required'] = ['name', 'image_path']\n validate_result, msg = utils.validate_json(data=request_content, schema=schema)\n if validate_result != 0:\n return failed(status=1000001, msg=msg)\n\n # create new vm template\n new_obj = VmTemplate(**request_content)\n\n # save objects\n try:\n new_obj.save()\n except IntegrityError as e:\n\n return failed(status=1001001, msg=str(e.__cause__))\n\n # return data\n data = new_obj.__dict__\n data.pop('_state')\n return success(data=data)", "def test_smoker_post(self):\n pass", "def test_api_use_simtv_post(self):\n body = Body1()\n response = self.client.open(\n '/api/use/simtv/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_volumes_post(self):\n pass", "def post(self):\n created = post_tool(request.json)\n return created, 201", "def test_vmcp_02(self):\r\n signature = dict(signature='XX')\r\n with patch('os.path.exists', return_value=True):\r\n with patch('pybossa.vmcp.sign', return_value=signature):\r\n res = self.app.get('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n out = json.loads(res.data)\r\n assert res.status_code == 200, out\r\n assert out['signature'] == signature['signature'], out\r\n\r\n # Now with a post\r\n res = self.app.post('api/vmcp?cvm_salt=testsalt',\r\n follow_redirects=True)\r\n assert res.status_code == 405, res.status_code", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)", "def test_post(self):\n pass", "def test_api_use_virtual_network_post(self):\n body = Network()\n response = self.client.open(\n '/api/use/virtual-network/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def testServicePost(self):\n\n text = \"This is a test sentence. And another sentence to split.\"\n results = self.client.post(\"workflow\", json={\"name\": \"post\", \"elements\": [text]}).json()\n\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0]), 2)", "def test_aws_service_api_vm_workshift_put(self):\n pass", "def test_create_machine(self, pretty_print, owner_api_token):\n create_machine_request = setup_data.get('create_machine', {}).get(\n 'request_body') or json.loads(\"\"\"{\n \"template\" : \"{}\",\n \"image\" : \"Debian\",\n \"quantity\" : 1.4658129805029452,\n \"disks\" : {\n \"disk_size\" : 0,\n \"disk_path\" : \"disk_path\"\n },\n \"fqdn\" : \"fqdn\",\n \"cloudinit\" : \"cloudinit\",\n \"volumes\" : \"\",\n \"save\" : true,\n \"dry\" : true,\n \"monitoring\" : true,\n \"tags\" : \"{}\",\n \"cloud\" : \"cloud\",\n \"size\" : \"m1.small\",\n \"optimize\" : \"optimize\",\n \"schedules\" : [ \"\", \"\" ],\n \"extra\" : \"\",\n \"name\" : \"DB mirror\",\n \"location\" : \"\",\n \"expiration\" : {\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\n \"action\" : \"stop\",\n \"notify\" : {\n \"period\" : \"minutes\",\n \"value\" : 1\n },\n \"notify_msg\" : \"notify_msg\"\n },\n \"net\" : \"\",\n \"scripts\" : [ \"\", \"\" ],\n \"key\" : \"\"\n}\"\"\", strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri,\n json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'reboot': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def on_post(self, req, resp):\n data = req.context['doc']\n user = req.context['token']\n vm_id = data['vm_id']\n\n try:\n log.info(\"Attempt to start VM [{}] for user [{}]..\".format(vm_id, user))\n xapi.current_session().start_vm(vm_id)\n except XapiOperationError as xoe:\n # starting a running VM, log and ignore\n log.info(xoe)\n\n info = xapi.current_session().get_vm_info(vm_id)\n log.info(\"Retrieved info of VM [{}].\".format(vm_id))\n\n resp.status = falcon.HTTP_200\n resp.context['result'] = {\n vm_id: {\n 'rdp_ip': info['ip'],\n 'rdp_port': 3389\n }\n }", "def taco_test_post(self):\n body = '{ \"id\": 500, \"name\": \"item5\", \"content\": \"qwerwqer5\" }'\n env = self.get_env('POST', '/item', body=body)\n result = next(webapi_start(env, lambda status, response_headers: self.validate('200 OK', status, response_headers)))\n self.assertEqual(result, b'5')", "def createVirtualMachine(self,node,post_data):\n data = self.connect('post',\"nodes/%s/qemu\" % (node), post_data)\n return data", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def test_post_method(self):\n self.getPage('/', method='POST')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')" ]
[ "0.7603022", "0.72159064", "0.6981549", "0.69673485", "0.693044", "0.6839709", "0.68275684", "0.67187107", "0.65854514", "0.6572492", "0.6507726", "0.6489554", "0.63969433", "0.63578254", "0.6249199", "0.6237796", "0.6137117", "0.61366147", "0.60767686", "0.6037173", "0.60177034", "0.5997519", "0.5951169", "0.59489477", "0.5894294", "0.5865746", "0.58437514", "0.5839802", "0.58122617", "0.57697487" ]
0.94187784
0
Test case for aws_service_api_volume_attachment_delete
def test_aws_service_api_volume_attachment_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_delete(self):\n pass", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])", "def test_attachment_deletion_allowed_attachment_from_volume(\n self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n volume.id)", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_attachment_deletion_allowed_volume_no_attachments(self):\n volume = tests_utils.create_volume(self.context)\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)", "def test_attachment_delete_reserved(self,\n mock_rpc_attachment_delete,\n mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n aobj = objects.VolumeAttachment.get_by_id(self.context,\n aref.id)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual(vref.id, aref.volume_id)\n self.volume_api.attachment_delete(self.context,\n aobj)\n mock_allowed.assert_called_once_with(self.context, aobj)\n\n # Since it's just reserved and never finalized, we should never make an\n # rpc call\n mock_rpc_attachment_delete.assert_not_called()", "def test_attachment_deletion_allowed_no_attachment(self):\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None)", "def test_attachment_deletion_allowed_mismatched_volume_and_attach_id(\n self, mock_get_attatchment):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n attachment2 = self._get_attachment()\n attachment2.volume_id = attachment.volume.id = fake.VOLUME2_ID\n self.assertRaises(exception.InvalidInput,\n self.volume_api.attachment_deletion_allowed,\n self.context, attachment2.id, volume)\n mock_get_attatchment.assert_called_once_with(self.context,\n attachment2.id)", "def test_attachment_deletion_allowed_multiple_attachment(self):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment, attachment])\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, None, volume)", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_delete_image(self):\n # Upload the image first\n self.test_upload_image()\n im = ImageAttachment.objects.all()[0]\n r = post(self.client, 'upload.del_image_async', args=[im.id])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n eq_(0, ImageAttachment.objects.count())", "def test_delete_asset(self):\n pass", "def test_attachment_deletion_allowed_not_found_attachment_id(\n self, mock_get_server, mock_get_attachment):\n mock_get_server.side_effect = nova.API.NotFound(404)\n mock_get_attachment.return_value = self._get_attachment()\n\n self.volume_api.attachment_deletion_allowed(self.context,\n fake.ATTACHMENT_ID)\n\n mock_get_attachment.assert_called_once_with(self.context,\n fake.ATTACHMENT_ID)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n fake.VOLUME_ID)", "def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_reserve_reserve_delete(self, mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.volume_api.attachment_delete(self.context,\n aref)\n mock_allowed.assert_called_once_with(self.context, aref)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.assertEqual(1, len(vref.volume_attachment))", "def test_delete_file(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'test.tar.gz')\n\n id1 = utils.generate_id('test.tar.gz')\n backend.put(src, id1)\n\n # regression testing (only delete what requested)\n id2 = id1.split('-')\n id2[4] += 'ZZZ'\n id2 = '-'.join(id2)\n\n backend.put(src, id1, True)\n backend.put_variant(src, id1, 'demo.txt')\n backend.put(src, id2, True)\n backend.delete(id1)\n\n path1 = '/'.join(backend.id_to_path(id1)) + '/test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id1)) + '/demo.txt'\n self.assertFalse(backend.exists(path1))\n self.assertFalse(backend.exists(path2))\n\n # assume only proper file deleted\n path3 = '/'.join(backend.id_to_path(id2)) + '/test.tar.gz'\n self.assertTrue(backend.exists(path3))", "def test_attachment_deletion_allowed_vm_not_found(self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n fake.VOLUME_ID)", "def test_delete_image_signature(self):\n pass", "def delete_file(_oid, attachmentId=None):\n md = Metadata.objects.get_or_404(pk=_oid)\n attachment = ''\n test_upload_path_prefix = \"uploadedfiles\"\n test_environment = False\n\n username = _authenticate_user_from_session(request)\n\n if username:\n try:\n try:\n md = Metadata.objects.get(id=_oid)\n \n try:\n # if developing locally we'll also want to remove file\n url = filter(\n lambda a: str(a.id) == attachmentId, md.attachments\n ).pop().url\n if str(os.environ['FLASKCONFIG']) == 'testing' or str(os.environ['FLASKCONFIG']) == 'development':\n test_environment = True\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n test_upload_path_prefix,\n _oid,\n os.path.basename(url)\n )\n )\n else:\n os.remove(\n os.path.join(\n app.config['UPLOADS_DEFAULT_DEST'],\n _oid,\n os.path.basename(url)\n )\n )\n except Exception:\n #Throw exception specific for test or non-test enviroment\n if test_environment:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + test_upload_path_prefix + \"/\" + _oid + \"/\" + os.path.basename(url)\n else:\n file_path = app.config['UPLOADS_DEFAULT_DEST'] + \"/\" + _oid + \"/\" + os.path.basename(url)\n\n print \"There was a problem deleting the file! Tried to reach path: \" + file_path \n \n # don't need to save after this since we're updating existing\n Metadata.objects(id=_oid).update_one(\n pull__attachments__id=attachmentId\n )\n \n md = Metadata.objects.get(id=_oid)\n \n # we'll just go ahead and not care if it doesn't exist\n except ValueError:\n pass\n\n\n except KeyError:\n try:\n keys = request.json.keys()\n keys_str = ', '.join(keys)\n except Exception as e:\n print \"Error: \" + str(e)\n return Response(\"Server error deleting file...\", status=500)\n\n return jsonify(\n {\n 'message':\n 'Key(s) ' + keys_str + ' not recognized. ' +\n 'Must contain \\'attachment\\''\n },\n status=400\n )\n\n return jsonify(dict(message=attachment + ' successfully (at/de)tached!', record=md))\n \n else:\n return Response('Bad or missing session id.', status=401)", "def test_attachment_deletion_allowed_service_call(self, mock_get_server):\n self.context.service_roles = ['reader', 'service']\n attachment = self._get_attachment()\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n mock_get_server.assert_not_called()", "def test_delete_image(self):\n pass", "def test_delete(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n response_upload = util.upload_file(client, DEFAULT_USER, file)\n response_delete = util.delete_file(client, DEFAULT_USER, response_upload.json[\"id\"])\n assert response_delete.status == \"204 NO CONTENT\"\n response_download = util.download_file(\n client, DEFAULT_USER, response_upload.json[\"id\"]\n )\n assert response_download.status == \"404 NOT FOUND\"", "def test_attachment_deletion_allowed_attachment_id_not_found(self,\n mock_get):\n attachment = self._get_attachment(with_instance_id=False)\n attachment.connection_info = None\n self.assertRaises(exception.ConflictNovaUsingAttachment,\n self.volume_api.attachment_deletion_allowed,\n self.context, fake.ATTACHMENT_ID)\n mock_get.assert_called_once_with(self.context, fake.ATTACHMENT_ID)", "def test_object_delete(self):\n self.add_attachments() # attach the attachments\n\n # we have 2 attachments\n self.assertEqual(3, self.eightythreeb.attachment_set.all().count())\n # delete a single object\n self.eightythreeb.attachment_set.all()[0].delete()\n # we should now have 2 active attachments\n self.assertEqual(2, self.eightythreeb.attachment_set.all().count())\n # and 1 deleted\n self.assertEqual(1, self.eightythreeb.attachment_set.deleted().count())", "def test_attachment_deletion_allowed_mismatch_id(self, mock_get_server):\n mock_get_server.return_value.attachment_id = fake.ATTACHMENT2_ID\n attachment = self._get_attachment()\n self.volume_api.attachment_deletion_allowed(self.context, attachment)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n fake.VOLUME_ID)", "def _test_delete_mm_document_with_volume(create_using_pid1=True):\n\n parent, child = _choose_endpoints_and_do_request(\n (client, json_headers, \"DELETE\"),\n (parent_pid, parent_pid_type, child_pid, child_pid_type),\n payload,\n create_using_pid1=create_using_pid1,\n )\n\n _assert_record_relations(\n parent,\n expected={\n \"relations\": {}\n },\n )\n _assert_record_relations(child, expected={\"relations\": {}})", "def delete(self, product_attachment_id):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json", "def test_delete_system_asset(self):\n pass" ]
[ "0.8310692", "0.77349037", "0.7635774", "0.75067145", "0.75060105", "0.7399964", "0.73437047", "0.7274928", "0.7239528", "0.7180327", "0.71410435", "0.69960743", "0.6957549", "0.6951939", "0.6923718", "0.687879", "0.6873728", "0.68463564", "0.6834399", "0.6799959", "0.67816293", "0.6768609", "0.6748093", "0.6704121", "0.6696957", "0.6658095", "0.66453815", "0.6613821", "0.65649337", "0.65286857" ]
0.9622656
0
Test case for aws_service_api_volume_attachment_put
def test_aws_service_api_volume_attachment_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_attachment_update_volume_in_error_state(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n vref.status = 'error'\n vref.save()\n connector = {'fake': 'connector',\n 'host': 'somehost'}\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_update,\n self.context,\n aref,\n connector)", "def test_attachment_create_creating_volume(self):\n volume_params = {'status': 'creating'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)", "def test_aws_service_api_volume_delete(self):\n pass", "def fusion_api_patch_storage_volume_attachments(self, body, param='', api=None, headers=None):\n return self.volume_attachment.patch(body=body, param=param, api=api, headers=headers)", "def test_put_raises_on_overwriting(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src1 = os.path.join(uploads, 'demo-test.tar.gz')\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src1, id, 'demo-test.tar.gz')\n with assert_raises(x.FileExists):\n backend.put_variant(src2, id, 'demo-test.tar.gz')", "def test_attachment_create_volume_in_error_state(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n vref.status = \"error\"\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID2)", "def test_aws_service_api_volume_get(self):\n pass", "def test_upload_attachment_to_container(fake_note_with_video_attachment, fake_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n base_url = 'https://dt-fs-test2.crm.crm.dynamics.com'\n oauth_url = 'https://dtdv-video-index-uspklrodz4yzi.azurewebsites.net/api/Dynamic365AuthToken?code=V5UYqIu=='\n\n oauth_token = \"AAABBBCCCDDDEEE\"\n\n account_name = \"storage_account_a\"\n container = \"container_a\"\n\n api_uri = base_url + Note.ATTACHMENT_ENDPOINT.format(note_id=note_id_value)\n a_note = Note.from_dict(note_id_value, fake_note_with_video_attachment)\n filename = a_note.filename\n\n blob_storage_endpoint = f\"https://{account_name}.blob.core.windows.net/{container}/{filename}\"\n\n rest_headers = {}\n responses.add(responses.GET, api_uri, json=fake_attachment, status=HTTPStatus.OK)\n responses.add(responses.POST, oauth_url, json={\"token\": oauth_token}, status=HTTPStatus.OK)\n responses.add(responses.PUT, blob_storage_endpoint, json={}, status=HTTPStatus.CREATED)\n\n downloaded_file = a_note.download_attachment(base_url, rest_headers)\n TAG_A = \"tag_a\"\n TAG_B = \"tag_b\"\n metadata_tags = {TAG_A: \"value_a\", TAG_B: \"value_b\"}\n assert a_note.upload_attachment_to_container(downloaded_file, metadata_tags, account_name, container, oauth_url)\n assert len(responses.calls) == 3\n assert responses.calls[0].request.url == api_uri\n assert responses.calls[1].request.url == oauth_url\n assert responses.calls[2].request.url == blob_storage_endpoint", "def test_put_object_from_file(self):\n self.get_file(20)\n response = self.bos.put_object_from_file(self.BUCKET, self.KEY, self.FILENAME)\n self.check_headers(response, [\"etag\"])", "def test_put(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn('file_id', response.data)\n self.assertIn('file_transfer_id', response.data)\n self.assertIn('file_transfer_secret_key', response.data)", "def test_attachment_create_readonly_volume(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.volume_api.update_readonly_flag(self.context, vref, True)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('ro', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "def test_put_file_variant(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src, id, 'variant.tar.gz')\n path = '/'.join(backend.id_to_path(id)) + '/variant.tar.gz'\n self.assertTrue(backend.exists(path))", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])", "def test_aws_service_api_volumes_post(self):\n pass", "def test_additional_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)\n\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual(2, len(vref.volume_attachment))", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def test_attach_attached_volume_to_same_server(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server, volume)", "def test_attachment_deletion_allowed_attachment_from_volume(\n self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n volume.id)", "def test_manage_volume_attachments(self, volume, instance, volumes_steps):\n volumes_steps.attach_instance(volume.name, instance.name)\n volumes_steps.detach_instance(volume.name, instance.name)", "def test_force_put_to_overwrite_existing(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n filename = 'demo-test.tar.gz'\n src1 = os.path.join(uploads, filename)\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id(filename)\n backend.put_variant(src1, id, filename)\n backend.put_variant(src2, id, filename, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/' + filename\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(Bucket=backend.bucket_name, Key=path)\n self.assertEquals(\n str(os.path.getsize(src2)),\n str(res['ResponseMetadata']['HTTPHeaders']['content-length'])\n )", "def test_upload_attachment_throws_exception_if_storage_api_returns_forbidden_or_internal_server_error(fake_note_with_video_attachment, fake_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n base_url = 'https://dt-fs-test2.crm.crm.dynamics.com'\n oauth_url = 'https://dtdv-video-index-uspklrodz4yzi.azurewebsites.net/api/Dynamic365AuthToken?code=V5UYqIu=='\n\n oauth_token = \"AAABBBCCCDDDEEE\"\n\n account_name = \"storage_account_a\"\n container = \"container_a\"\n\n api_uri = base_url + Note.ATTACHMENT_ENDPOINT.format(note_id=note_id_value)\n a_note = Note.from_dict(note_id_value, fake_note_with_video_attachment)\n filename = a_note.filename\n\n blob_storage_endpoint = f\"https://{account_name}.blob.core.windows.net/{container}/{filename}\"\n\n rest_headers = {}\n responses.add(responses.GET, api_uri, json=fake_attachment, status=HTTPStatus.OK)\n responses.add(responses.POST, oauth_url, json={\"token\": oauth_token}, status=HTTPStatus.OK)\n responses.add(responses.PUT, blob_storage_endpoint, json={}, status=HTTPStatus.BAD_REQUEST)\n\n downloaded_file = a_note.download_attachment(base_url, rest_headers)\n TAG_A = \"tag_a\"\n TAG_B = \"tag_b\"\n metadata_tags = {TAG_A: \"value_a\", TAG_B: \"value_b\"}\n with pytest.raises(Exception):\n a_note.upload_attachment_to_container(downloaded_file, metadata_tags, account_name, container, oauth_url)\n\n responses.add(responses.PUT, blob_storage_endpoint, json={}, status=HTTPStatus.INTERNAL_SERVER_ERROR)\n\n downloaded_file = a_note.download_attachment(base_url, rest_headers)\n TAG_A = \"tag_a\"\n TAG_B = \"tag_b\"\n metadata_tags = {TAG_A: \"value_a\", TAG_B: \"value_b\"}\n with pytest.raises(Exception):\n a_note.upload_attachment_to_container(downloaded_file, metadata_tags, account_name, container, oauth_url)", "def test_attachment_deletion_allowed_volume_no_attachments(self):\n volume = tests_utils.create_volume(self.context)\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)", "def test_attachment_deletion_allowed_mismatched_volume_and_attach_id(\n self, mock_get_attatchment):\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n attachment2 = self._get_attachment()\n attachment2.volume_id = attachment.volume.id = fake.VOLUME2_ID\n self.assertRaises(exception.InvalidInput,\n self.volume_api.attachment_deletion_allowed,\n self.context, attachment2.id, volume)\n mock_get_attatchment.assert_called_once_with(self.context,\n attachment2.id)", "def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)", "def test_attach_file(self):\n data = data_from_file(\"test_attach_file.json\")\n self.assertEqual(data, attach_file(data_full_filename(\"img.png\")),\n msg=\"attach_file() returns a wrong response\")", "def test_aws_service_api_vm_tag_put(self):\n pass" ]
[ "0.78520364", "0.70537263", "0.6843338", "0.66168505", "0.65721464", "0.6559735", "0.6498688", "0.64489305", "0.6446952", "0.6433131", "0.6415115", "0.6407497", "0.63907266", "0.63678694", "0.6332799", "0.6299971", "0.6292937", "0.6288657", "0.62113434", "0.6199681", "0.61764616", "0.61496884", "0.6145758", "0.610695", "0.6095503", "0.609233", "0.6084409", "0.60474634", "0.60264844", "0.6014518" ]
0.95771945
0
Test case for aws_service_api_volume_delete
def test_aws_service_api_volume_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))", "def test_aws_service_api_vm_delete(self):\n pass", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])", "def test_aws_service_api_volume_get(self):\n pass", "def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_aws_service_api_snapshot_delete(self):\n pass", "def delete(self):\r\n return self.connection.delete_volume(self.id)", "def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)", "def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()", "def delete_volume_record( volume ):\n \n logger.info( \"Delete Volume =%s\\n\\n\" % volume.name )\n \n volume_name = volume.name \n config = observer_core.get_config()\n \n # delete the Volume on Syndicate.\n try:\n rc = observer_core.ensure_volume_absent( volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to delete volume %s\", volume_name )\n raise e\n \n return rc", "def test_aws_service_api_volume_patch(self):\n pass", "def test_main_exit_absent(self, mock_delete_volume, mock_module, mock_client):\n PARAMS = {\n 'storage_system_ip': '192.168.0.1',\n 'storage_system_name': '3PAR',\n 'storage_system_username': 'USER',\n 'storage_system_password': 'PASS',\n 'volume_name': 'test_volume',\n 'cpg': None,\n 'size': None,\n 'size_unit': None,\n 'snap_cpg': None,\n 'wait_for_task_to_end': None,\n 'new_name': None,\n 'expiration_hours': None,\n 'retention_hours': None,\n 'ss_spc_alloc_warning_pct': None,\n 'ss_spc_alloc_limit_pct': None,\n 'usr_spc_alloc_warning_pct': None,\n 'usr_spc_alloc_limit_pct': None,\n 'rm_ss_spc_alloc_warning': None,\n 'rm_usr_spc_alloc_warning': None,\n 'rm_exp_time': None,\n 'rm_usr_spc_alloc_limit': None,\n 'rm_ss_spc_alloc_limit': None,\n 'compression': False,\n 'type': 'thin',\n 'keep_vv': None,\n 'state': 'absent'\n }\n # This creates a instance of the AnsibleModule mock.\n mock_module.params = PARAMS\n mock_module.return_value = mock_module\n instance = mock_module.return_value\n mock_delete_volume.return_value = (True, True, \"Deleted volume successfully.\", {})\n hpe3par_volume.main()\n # AnsibleModule.exit_json should be called\n instance.exit_json.assert_called_with(\n changed=True, msg=\"Deleted volume successfully.\")\n # AnsibleModule.fail_json should not be called\n self.assertEqual(instance.fail_json.call_count, 0)", "def test_vault_delete_vault_item(self):\n pass", "def delete_volume(self, context, volume_id, unmanage_only=False):\n context = context.elevated()\n\n volume_ref = self.db.volume_get(context, volume_id)\n\n if context.project_id != volume_ref['project_id']:\n project_id = volume_ref['project_id']\n else:\n project_id = context.project_id\n\n LOG.info(_(\"volume %s: deleting\"), volume_ref['id'])\n if volume_ref['attach_status'] == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=volume_id)\n\n self._notify_about_volume_usage(context, volume_ref, \"delete.start\")\n self._reset_stats()\n\n try:\n self._delete_cascaded_volume(context, volume_id)\n except Exception:\n LOG.exception(_(\"Failed to deleting volume\"))\n # Get reservations\n try:\n reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting volume\"))\n\n # Delete glance metadata if it exists\n try:\n self.db.volume_glance_metadata_delete_by_volume(context, volume_id)\n LOG.debug(_(\"volume %s: glance metadata deleted\"),\n volume_ref['id'])\n except exception.GlanceMetadataNotFound:\n LOG.debug(_(\"no glance metadata found for volume %s\"),\n volume_ref['id'])\n\n self.db.volume_destroy(context, volume_id)\n LOG.info(_(\"volume %s: deleted successfully\"), volume_ref['id'])\n self._notify_about_volume_usage(context, volume_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.publish_service_capabilities(context)\n\n return True", "def test_delete_destination_volume_in_migration(self):\n self._test_delete_volume_in_migration('target:vol-id')", "def test_aws_service_api_volumes_get(self):\n pass", "def _test_delete_volume_in_migration(self, migration_status):\n volume = tests_utils.create_volume(self.context, host=CONF.host,\n migration_status=migration_status)\n self.volume.delete_volume(self.context, volume=volume)\n\n # The volume is successfully removed during the volume delete\n # and won't exist in the database any more.\n self.assertRaises(exception.VolumeNotFound, volume.refresh)", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def _test_delete_mm_document_with_volume(create_using_pid1=True):\n\n parent, child = _choose_endpoints_and_do_request(\n (client, json_headers, \"DELETE\"),\n (parent_pid, parent_pid_type, child_pid, child_pid_type),\n payload,\n create_using_pid1=create_using_pid1,\n )\n\n _assert_record_relations(\n parent,\n expected={\n \"relations\": {}\n },\n )\n _assert_record_relations(child, expected={\"relations\": {}})", "def test_delete_file(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'test.tar.gz')\n\n id1 = utils.generate_id('test.tar.gz')\n backend.put(src, id1)\n\n # regression testing (only delete what requested)\n id2 = id1.split('-')\n id2[4] += 'ZZZ'\n id2 = '-'.join(id2)\n\n backend.put(src, id1, True)\n backend.put_variant(src, id1, 'demo.txt')\n backend.put(src, id2, True)\n backend.delete(id1)\n\n path1 = '/'.join(backend.id_to_path(id1)) + '/test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id1)) + '/demo.txt'\n self.assertFalse(backend.exists(path1))\n self.assertFalse(backend.exists(path2))\n\n # assume only proper file deleted\n path3 = '/'.join(backend.id_to_path(id2)) + '/test.tar.gz'\n self.assertTrue(backend.exists(path3))", "def delete_volume(self, volume):\n vg_name = self.get_volume_group_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n try:\n if self._get_is_replica(volume.volume_type) and self.replica:\n self._delete_volume_replica(volume, vg_name, vol_name)\n\n LOG.debug(\"Searching and deleting volume: %s in K2.\", vol_name)\n vol_rs = self.client.search(\"volumes\", name=vol_name)\n if vol_rs.total != 0:\n vol_rs.hits[0].delete()\n LOG.debug(\"Searching and deleting vg: %s in K2.\", vg_name)\n vg_rs = self.client.search(\"volume_groups\", name=vg_name)\n if vg_rs.total != 0:\n vg_rs.hits[0].delete()\n except Exception as ex:\n LOG.exception(\"Deletion of volume %s failed.\", vol_name)\n raise KaminarioCinderDriverException(reason=ex)", "def test_aws_service_api_volumes_post(self):\n pass", "def delete(self, volume_id):\n self.client().volumes.delete(volume_id)", "def _delete_image_volume(self,\n context: context.RequestContext,\n cache_entry: dict) -> None:\n volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])\n\n # Delete will evict the cache entry.\n self.volume_api.delete(context, volume)", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_delete_source_volume_in_migration(self):\n self._test_delete_volume_in_migration('migrating')", "def test_reserve_reserve_delete(self, mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.volume_api.attachment_delete(self.context,\n aref)\n mock_allowed.assert_called_once_with(self.context, aref)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.assertEqual(1, len(vref.volume_attachment))" ]
[ "0.8468831", "0.81775326", "0.8146488", "0.77009195", "0.75285304", "0.7432786", "0.7391771", "0.735651", "0.73492223", "0.72565275", "0.71595895", "0.7126499", "0.7101761", "0.70642024", "0.7002589", "0.69212985", "0.6907403", "0.68808544", "0.6820135", "0.67568016", "0.67457217", "0.67452824", "0.67220104", "0.6703999", "0.670387", "0.6643652", "0.66372275", "0.65870947", "0.6584713", "0.6583444" ]
0.9583541
0
Test case for aws_service_api_volume_get
def test_aws_service_api_volume_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volumes_get(self):\n pass", "def test_volumes_get(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_aws_service_api_volume_types_get(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)", "def test_aws_service_api_volumes_post(self):\n pass", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def test_aws_service_api_vm_get(self):\n pass", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def get_volume(self, volume_id):\n url = '%s/volumes/%s' % (self.catalog['volume'], volume_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['volume']\n else:\n LOG.error('Get volume failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def get_volume(self, volume_id):\n aname = \"cinder_v%s.get_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.get(volume_id)", "def test_calculate_volume(self, mock_send_cli_cmd):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"calculate volume with number\"\n response = [\"2000\", \"400\", \"-\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n )\n self.assertTrue(result)\n\n msg = \"calculate volume with number with wing1_volume\"\n response = [\"2000\", \"400\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n wing1_volume=\"1000\"\n )\n self.assertTrue(result)", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_aws_service_api_vm_management_get(self):\n pass", "def test_volume():\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)", "def test_aws_service_api_image_get(self):\n pass", "def test_aws_service_api_vm_details_get(self):\n pass", "def volumes(self):", "def get_volume(volume, array):\n try:\n return array.get_volume(volume, pending=True)\n except Exception:\n return None", "def test_vault_get_vault_item(self):\n pass", "def test_aws_service_api_vms_get(self):\n pass", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_aws_service_api_private_image_get(self):\n pass", "def test_update_volume_stats(self):\n actual = self.driver.get_volume_stats(True)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual(90, actual['total_capacity_gb'])\n self.assertEqual(87, actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def test_sound_volume(self):\n return self.send(\"test_sound_volume\")" ]
[ "0.87159956", "0.8193062", "0.79313076", "0.78389907", "0.75917023", "0.72042996", "0.7173881", "0.7138423", "0.70423996", "0.6946495", "0.6889604", "0.6761077", "0.67592734", "0.67408574", "0.66669095", "0.65210545", "0.6495301", "0.64644176", "0.6450827", "0.64361185", "0.6416461", "0.6414861", "0.63982975", "0.63775164", "0.63441986", "0.62950045", "0.6241972", "0.62292844", "0.62123376", "0.6189762" ]
0.9506741
0
Test case for aws_service_api_volume_patch
def test_aws_service_api_volume_patch(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def test_volumes_get(self):\n pass", "def test_aws_service_api_vm_patch(self):\n pass", "def test_volumes_post(self):\n pass", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_aws_service_api_volume_types_get(self):\n pass", "def test_patch_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume_1 = synthetic_volume_full(host)\n spare_volume_2 = synthetic_volume_full(host)\n\n response = self.api_client.patch(\n \"/api/target/\",\n data={\n \"objects\": [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_1.id},\n {\"kind\": \"MDT\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_2.id},\n ],\n \"deletions\": [],\n },\n )\n self.assertHttpAccepted(response)", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def test_py_volume(self):\n self._test_py_compile('volume')", "def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)", "def test_aws_service_api_vm_command_put(self):\n pass", "def test_crud_volume_metadata(self):\n # Create metadata for the volume\n metadata = {\"key1\": \"value1\",\n \"key2\": \"value2\",\n \"key3\": \"value3\",\n \"key4\": \"<value&special_chars>\"}\n update = {\"key4\": \"value4\",\n \"key1\": \"value1_update\"}\n expected = {\"key4\": \"value4\"}\n\n body = self.volumes_client.create_volume_metadata(self.volume['id'],\n metadata)['metadata']\n self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))\n # Get the metadata of the volume\n body = self.volumes_client.show_volume_metadata(\n self.volume['id'])['metadata']\n self.assertThat(body.items(), matchers.ContainsAll(metadata.items()),\n 'Create metadata for the volume failed')\n\n # Update metadata\n body = self.volumes_client.update_volume_metadata(\n self.volume['id'], update)['metadata']\n self.assertEqual(update, body)\n body = self.volumes_client.show_volume_metadata(\n self.volume['id'])['metadata']\n self.assertEqual(update, body, 'Update metadata failed')\n\n # Delete one item metadata of the volume\n self.volumes_client.delete_volume_metadata_item(\n self.volume['id'], \"key1\")\n body = self.volumes_client.show_volume_metadata(\n self.volume['id'])['metadata']\n self.assertNotIn(\"key1\", body)\n self.assertThat(body.items(), matchers.ContainsAll(expected.items()),\n 'Delete one item metadata of the volume failed')", "def test_volume():\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)", "def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)", "def test_main_exit_modify(self, mock_modify_volume, mock_module, mock_client):\n PARAMS_FOR_PRESENT = {\n 'storage_system_ip': '192.168.0.1',\n 'storage_system_name': '3PAR',\n 'storage_system_username': 'USER',\n 'storage_system_password': 'PASS',\n 'volume_name': 'test_volume',\n 'cpg': None,\n 'size': None,\n 'size_unit': None,\n 'snap_cpg': None,\n 'wait_for_task_to_end': None,\n 'new_name': 'new_volume',\n 'expiration_hours': 10,\n 'retention_hours': 100,\n 'ss_spc_alloc_warning_pct': 0,\n 'ss_spc_alloc_limit_pct': 0,\n 'usr_spc_alloc_warning_pct': 0,\n 'usr_spc_alloc_limit_pct': 0,\n 'rm_ss_spc_alloc_warning': False,\n 'rm_usr_spc_alloc_warning': False,\n 'rm_exp_time': False,\n 'rm_usr_spc_alloc_limit': False,\n 'rm_ss_spc_alloc_limit': False,\n 'compression': None,\n 'type': None,\n 'keep_vv': None,\n 'state': 'modify'\n }\n # This creates a instance of the AnsibleModule mock.\n mock_module.params = PARAMS_FOR_PRESENT\n mock_module.return_value = mock_module\n instance = mock_module.return_value\n mock_modify_volume.return_value = (True, True, \"Modified volume successfully.\", {})\n hpe3par_volume.main()\n # AnsibleModule.exit_json should be called\n instance.exit_json.assert_called_with(\n changed=True, msg=\"Modified volume successfully.\")\n # AnsibleModule.fail_json should not be called\n self.assertEqual(instance.fail_json.call_count, 0)", "def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)", "def test_upload_new_vdisk(self, mock_create_file):\n\n # traits are already set to use the REST API upload\n\n # First need to load in the various test responses.\n vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)\n vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)\n\n self.adpt.read.return_value = vg_orig\n self.adpt.update_by_path.return_value = vg_post_crt\n mock_create_file.return_value = self._fake_meta()\n\n n_vdisk, f_wrap = ts.upload_new_vdisk(\n self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,\n d_size=25, sha_chksum='abc123')\n\n # Ensure the create file was called\n mock_create_file.assert_called_once_with(\n self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid,\n f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3',\n sha_chksum='abc123')\n\n # Ensure cleanup was called after the upload\n self.adpt.delete.assert_called_once_with(\n 'File', service='web',\n root_id='6233b070-31cc-4b57-99bd-37f80e845de9')\n self.assertIsNone(f_wrap)\n self.assertIsNotNone(n_vdisk)\n self.assertIsInstance(n_vdisk, stor.VDisk)", "def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def test_create_volume_blocked(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n self._return_blocked = 1 # Block & fail cancel => create succeeded\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)\n self.assertTrue(self._request_cancel)", "def test_volume_service(self):\n self.assertIsInstance(ChangeStateScript()._deployer._volume_service,\n VolumeService)", "def test_calculate_volume(self, mock_send_cli_cmd):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"calculate volume with number\"\n response = [\"2000\", \"400\", \"-\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n )\n self.assertTrue(result)\n\n msg = \"calculate volume with number with wing1_volume\"\n response = [\"2000\", \"400\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n wing1_volume=\"1000\"\n )\n self.assertTrue(result)", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_extend_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.extended = {'name': '', 'size': '0',\n 'storageserver': ''}\n self.driver.extend_volume(volume, 12)\n expected = {'name': 'volume10', 'size': '2',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,'}\n self.assertDictMatch(expected, self.extended)", "def update_volumes():\n print 'do something useful here'", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()" ]
[ "0.8404985", "0.7963305", "0.7776195", "0.77071863", "0.76335806", "0.74082214", "0.7226184", "0.7076858", "0.6940691", "0.6913554", "0.6855418", "0.671127", "0.6625104", "0.64926636", "0.6474662", "0.6460252", "0.64272976", "0.6321299", "0.6295169", "0.6265758", "0.6237064", "0.6233903", "0.62274325", "0.6224669", "0.6195144", "0.6190352", "0.61685544", "0.61551565", "0.6140889", "0.613726" ]
0.9439126
0
Test case for aws_service_api_volume_types_get
def test_aws_service_api_volume_types_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_get(self):\n pass", "def get_volume_types(self):\n res = self.get('%s/types' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volume_types']\n else:\n LOG.error('Get volume types failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def _volume_types(cls):\n try:\n return cls.volumes.behaviors.get_volume_types()\n except:\n raise DatasetGeneratorError(\n \"Unable to retrieve list of volume types during \"\n \"data-driven-test setup.\")", "def test_aws_service_api_volumes_get(self):\n pass", "def test_volumes_get(self):\n pass", "def test_get_types(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def get_volume_type(self, volume_type_id):\n url = '%s/types/%s' % (self.catalog['volume'], volume_type_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['volume_type']\n else:\n LOG.error('Get volume type failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def test_change_volume_type(self, create_volume, volumes_steps):\n volume_name = generate_ids('volume').next()\n create_volume(volume_name, volume_type=None)\n volumes_steps.change_volume_type(volume_name)", "def test_aws_service_api_volume_delete(self):\n pass", "def volume_types_steps(horizon, login):\n return VolumeTypesSteps(horizon)", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def volume_type(self):\n return 'UNKNOWN'", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def get_volume_type(self, volume_type):\n aname = \"cinder_v%s.get_volume_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_types.get(volume_type)", "def list_type_access(self, volume_type):\n return self._impl.list_type_access(volume_type)", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def volumes(self):", "def test_aws_service_api_flavors_get(self):\n pass", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_amount_of_file_type_volumes(host_ip, sp_id, sd_id, image_id):\n # Build the path to the Disk's location on the file system\n volume_path = FILE_SD_VOLUME_PATH_IN_FS % (sp_id, sd_id, image_id)\n command = GET_FILE_SD_NUM_DISK_VOLUMES % volume_path\n executor = rhevm_helpers.get_host_executor(\n ip=host_ip, password=config.VDC_ROOT_PASSWORD\n )\n rc, output, err = executor.run_cmd(shlex.split(command))\n\n assert not rc, errors.CommandExecutionError(\"Output: %s\" % output)\n # There are a total of 3 files/volume, the volume metadata (.meta),\n # the volume lease (.lease) and the volume content itself (no\n # extension)\n num_volumes = int(output)/3\n logger.debug(\n \"The number of file type volumes found is '%s'\",num_volumes\n )\n return num_volumes", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def volume_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"volume_type\")", "def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)", "def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)", "def test_volumes_post(self):\n pass", "def test_list_media_type(self):\n\n # check if documentalist has access to list media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, \"Video\")" ]
[ "0.7527856", "0.7515748", "0.7399525", "0.73993516", "0.70917654", "0.68899465", "0.63437366", "0.6312394", "0.60646117", "0.6017373", "0.59605116", "0.58603066", "0.5806784", "0.5803278", "0.5764818", "0.57422024", "0.5703015", "0.5697612", "0.56767386", "0.5667692", "0.56516546", "0.5650676", "0.5613081", "0.5590484", "0.5590484", "0.5590484", "0.5574165", "0.5561891", "0.5553772", "0.5543958" ]
0.956193
0
Test case for aws_service_api_volumes_get
def test_aws_service_api_volumes_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_volumes_get(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_volume_types_get(self):\n pass", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def test_aws_service_api_volume_patch(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def test_volumes_post(self):\n pass", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def _get_volumes(list_of_volume_ids):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n volumes = ec2_client.get_all_volumes(\n volume_ids=list_of_volume_ids)\n except boto.exception.EC2ResponseError as e:\n if 'InvalidVolume.NotFound' in e:\n all_volumes = ec2_client.get_all_volumes()\n utils.log_available_resources(all_volumes)\n return None\n except boto.exception.BotoServerError as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return volumes", "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def volumes(self):", "def test_aws_service_api_vm_get(self):\n pass", "def get_volumes(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n destroyed=None, # type: bool\n filter=None, # type: str\n ids=None, # type: List[str]\n limit=None, # type: int\n names=None, # type: List[str]\n offset=None, # type: int\n sort=None, # type: List[str]\n total_item_count=None, # type: bool\n total_only=None, # type: bool\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeGetResponse\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n destroyed=destroyed,\n filter=filter,\n ids=ids,\n limit=limit,\n names=names,\n offset=offset,\n sort=sort,\n total_item_count=total_item_count,\n total_only=total_only,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volumes_api.api20_volumes_get_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n return self._call_api(endpoint, kwargs)", "def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def test_aws_service_api_snapshots_get(self):\n pass", "def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)", "def test_aws_service_api_vms_get(self):\n pass", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def test_volumes_complex(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /foo: /host/foo\n /bar:\n hostpath: /host/bar\n /snap:\n hostpath: /host/snap\n options: z,ro\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n vols = config.volumes\n assert len(vols) == 3\n\n v = vols[\"/foo\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/foo\"\n assert v.host_path == \"/host/foo\"\n assert v.options == []\n\n v = vols[\"/bar\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/bar\"\n assert v.host_path == \"/host/bar\"\n assert v.options == []\n\n v = vols[\"/snap\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/snap\"\n assert v.host_path == \"/host/snap\"\n assert v.options == [\"z\", \"ro\"]", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def get_volume(self, volume_id):\n url = '%s/volumes/%s' % (self.catalog['volume'], volume_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['volume']\n else:\n LOG.error('Get volume failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def describe_volumes(self, volume_ids = None, max_results = -1, next_token = \"\", detail = True):\n response = volume.describe_volumes(self.url, self.verb, self.headers, self.version, \n volume_ids, max_results, next_token, detail)\n if response is not None :\n res = DescribeVolumesResponse.DescribeVolumesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None" ]
[ "0.8942529", "0.87519145", "0.80404556", "0.77520996", "0.7608479", "0.7544432", "0.7415526", "0.733975", "0.7072568", "0.6962387", "0.69297963", "0.6905789", "0.68827385", "0.68624336", "0.6763317", "0.67302483", "0.671769", "0.6712588", "0.6687525", "0.6679484", "0.6639061", "0.6589829", "0.65263295", "0.6525666", "0.6488497", "0.6384449", "0.6366261", "0.6357378", "0.63461834", "0.6340503" ]
0.9550234
0
Test case for aws_service_api_volumes_post
def test_aws_service_api_volumes_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_volumes_post(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_volumes_get(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def post_volumes_pvcvolume_action(self, body, **kw):\n _body = None\n resp = 202\n assert len(list(body.keys())) == 1\n action = list(body.keys())[0]\n if action == 'os-attach':\n assert sorted(list(body[action])) == ['instance_uuid',\n 'mode',\n 'mountpoint']\n elif action == 'os-detach':\n assert body[action] is None\n elif action == 'os-reserve':\n assert body[action] is None\n elif action == 'os-unreserve':\n assert body[action] is None\n elif action == 'os-initialize_connection':\n assert list(body[action].keys()) == ['connector']\n return (202, {}, {'connection_info': 'foos'})\n elif action == 'os-terminate_connection':\n assert list(body[action].keys()) == ['connector']\n elif action == 'os-begin_detaching':\n assert body[action] is None\n elif action == 'os-roll_detaching':\n assert body[action] is None\n elif action == 'os-reset_status':\n assert 'status' in body[action]\n else:\n raise AssertionError(\"Unexpected action: %s\" % action)\n return (resp, {}, _body)", "def post_volumes(\n self,\n references=None, # type: List[models.ReferenceType]\n volume=None, # type: models.VolumePost\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n overwrite=None, # type: bool\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeResponse\n kwargs = dict(\n volume=volume,\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n overwrite=overwrite,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volumes_api.api20_volumes_post_with_http_info\n _process_references(references, ['names'], kwargs)\n return self._call_api(endpoint, kwargs)", "def test_aws_service_api_snapshots_post(self):\n pass", "def test_aws_service_api_vms_post(self):\n pass", "def test_upload_new_vdisk(self, mock_create_file):\n\n # traits are already set to use the REST API upload\n\n # First need to load in the various test responses.\n vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)\n vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)\n\n self.adpt.read.return_value = vg_orig\n self.adpt.update_by_path.return_value = vg_post_crt\n mock_create_file.return_value = self._fake_meta()\n\n n_vdisk, f_wrap = ts.upload_new_vdisk(\n self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,\n d_size=25, sha_chksum='abc123')\n\n # Ensure the create file was called\n mock_create_file.assert_called_once_with(\n self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid,\n f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3',\n sha_chksum='abc123')\n\n # Ensure cleanup was called after the upload\n self.adpt.delete.assert_called_once_with(\n 'File', service='web',\n root_id='6233b070-31cc-4b57-99bd-37f80e845de9')\n self.assertIsNone(f_wrap)\n self.assertIsNotNone(n_vdisk)\n self.assertIsInstance(n_vdisk, stor.VDisk)", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def test_aws_service_api_volume_types_get(self):\n pass", "def test_volumes_volname_stop_post(self):\n pass", "def test_volumes_volname_start_post(self):\n pass", "def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def setupVolumes(volumes: Volumes) -> None:\n volumesList = readProcessJson(\n [\"podman\", \"volume\", \"ls\", \"--format\", \"json\"])\n existingVolumes: Set[str] = set()\n if volumesList:\n for volume in volumesList:\n existingVolumes.add(volume['name'])\n for volume in volumes.values():\n if volume.name not in existingVolumes:\n log.info(f\"Creating volume {volume.name}\")\n execute([\"podman\", \"volume\", \"create\", volume.name])\n if volume.files:\n for file in volume.files:\n path = Path(\"~/.local/share/containers/storage/volumes/\"\n f\"{volume.name}/_data/{file.name}\").expanduser()\n if not path.exists():\n log.info(f\"Writting {path}\")\n path.write_text(file.content)", "def test_upload_volume_to_image(self, volume, images_steps, volumes_steps):\n image_name = next(generate_ids('image', length=20))\n volumes_steps.upload_volume_to_image(volume.name, image_name)\n\n images_steps.page_images().table_images.row(\n name=image_name).wait_for_presence(30)\n images_steps.delete_image(image_name)", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_aws_service_api_vm_command_put(self):\n pass", "def update_volumes():\n print 'do something useful here'", "def test_delete_volumes(self, volumes_count, volumes_steps,\n create_volumes):\n volume_names = list(generate_ids('volume', count=volumes_count))\n create_volumes(volume_names)", "def test_aws_service_api_vm_tag_put(self):\n pass", "def volumes(self):", "def fusion_api_create_storage_volume(self, body, api=None, headers=None):\n return self.volume.create(body=body, api=api, headers=headers)" ]
[ "0.8798903", "0.7797968", "0.72953814", "0.69540304", "0.6889303", "0.6877601", "0.67905885", "0.6789412", "0.6704169", "0.6693733", "0.6646005", "0.6571699", "0.64049333", "0.63365346", "0.62437063", "0.6133804", "0.60914564", "0.6068486", "0.60637164", "0.6053222", "0.6037397", "0.6030986", "0.59316975", "0.5871674", "0.58301854", "0.58205366", "0.58151394", "0.5796122", "0.57383305", "0.5717779" ]
0.94235116
0
An observation for Deep Xi (noisyspeech STMS and STPS).
def observation(self, x): x = self.normalise(x) x_STMS, x_STPS = self.polar_analysis(x) x_STMS_STPS = tf.concat([x_STMS, x_STPS], axis=-1) return x_STMS_STPS, x_STMS_STPS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation(self, x):\n\t\tx = self.normalise(x)\n\t\tx_STDCT = self.stdct_analysis(x)\n\t\treturn x_STDCT, None", "def observation(self, x):\n\t\tx = self.normalise(x)\n\t\tx_STMS, x_STPS = self.polar_analysis(x)\n\t\treturn x_STMS, x_STPS", "def get_observable_description(stix_obj, log):\n desc = \"\"\n if stix_obj[\"type\"] == \"observed-data\":\n #\n # This stix object type is special. We need to dig deeper for\n # the observable we care\n #\n '''\n {\n \"objects\": {\"0\": {\"type\": \"ipv4-addr\", \"value\": \"83.217.8.127\"}},\n \"modified\": \"2018-06-19T15:54:11.000Z\", \n \"x_ibm_security_toxicity\": \"very-high\", \n \"type\": \"observed-data\",\n \"id\": \"observed-data--cd36a717-ac23-409f-a6bc-ab078ab1fcef\", \n \"last_observed\": \"2018-06-12T13:10:09.000Z\"\n }\n '''\n #\n # so far all the observed-data has only one embedded obj\n # if there is more, log the error\n #\n if len(stix_obj[\"objects\"]) > 1:\n log.error(\"Observed-data {} has {} objects!\".format(stix_obj[\"id\"], str(len(stix_obj[\"objects\"]))))\n\n # Only look at the first one\n obj = stix_obj[\"objects\"][\"0\"]\n if obj.get(\"type\", \"\") == \"file\" and \"hashes\" in obj:\n # file type with hashes. Use hash value as description\n try:\n hashes = obj[\"hashes\"]\n desc = hashes.get(\"MD5\", hashes.get(\"SHA-256\", str(hashes)))\n except Exception as e:\n log.error(\"Failed to extract hash value from {}\".format(str(obj)))\n desc = str(hashes)\n elif obj.get(\"name\", \"\") == \"unknown\" and obj.get(\"type\", \"\") == \"file\":\n # If this obj is a file, the name could be unknown. Do something special for it\n desc = \"file with name unknown\"\n else:\n desc = obj.get(\"value\", obj.get(\"name\", obj.get(\"hashes\", \"\")))\n elif stix_obj[\"type\"] == u\"indicator\":\n if stix_obj[u\"name\"] == u\"IpAddress\":\n desc = stix_obj[u\"pattern\"].replace(\"[ipv4-addr:value='\", '').replace(\"']\", '')\n elif stix_obj[u\"name\"] == u\"Url\":\n desc = stix_obj[u\"pattern\"].replace(\"[url:value='\", '').replace(\"']\", '')\n elif stix_obj[u\"name\"] == u\"Malicious URL\":\n desc = stix_obj[u\"pattern\"].replace(\"[url:value = '\", '').replace(\"']\", '')\n elif stix_obj[u\"name\"] == u\"DomainName\":\n desc = stix_obj[u\"pattern\"].replace(\"[domain-name:value='\", '').replace(\"']\", '')\n elif stix_obj[u\"name\"] == u\"Hash\":\n # Desc is obtained by extracting hash value from stix_obj 'pattern'.\n # e.g. \"file:hashes.'MD5'='abcd1234effe56786543abcd1234effe\" -> \"abcd1234effe56786543abcd1234effe\"\n desc = re.sub(HASH_INDICATOR_REGEX, '', stix_obj[u\"pattern\"]).replace(\"']\", '')\n else:\n # Don't know how to handle the pattern, just put everything\n desc = str(stix_obj[u\"pattern\"])\n log.debug(\"Not handling {}\".format(str(stix_obj)))\n else:\n desc = stix_obj[\"name\"]\n\n #\n # Make sure it is a string\n #\n desc_str = str(desc)\n\n return desc_str", "def _get_observation(self, observation):", "def observation(state):\n return state[:4]", "def in_situ_tair_snd(sno0, year0=2016, npr_date=-1, ascat_date=-1):\n if npr_date < 0:\n npr_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n if ascat_date < 0:\n ascat_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n snd_name = \"snow\"\n print 'the %d was processing' % sno0\n sno = str(sno0)\n tair_name = \"Air Temperature Observed (degC)\"\n if sno0 in [2065, 2081]:\n if year0 == 2016:\n tair_name = \"Air Temperature Average (degC)\"\n # read measurements\n hr_list = [5, 7, 9, 14, 18, 21]\n t_air_one_year = read_site.in_situ_series(sno, y=year0, hr=hr_list) # [:, :, 0] temperature at 7:00 (local)\n # time_above_zero_0 = data_process.zero_find(t_air_one_year[:, :, 0], w=10, th=-0.1) #\n # time_above_zero_1 = data_process.zero_find(t_air_one_year[:, :, 1], w=10, th=-0.1)\n # time_above_zero_2 = data_process.zero_find(t_air_one_year[:, :, 3], w=10, th=-0.1)\n time_above_zero_list = [data_process.zero_find(t_air_one_year[:, :, i], w=10, th=-0.1)\n for i in range(0, len(hr_list))]\n date_tuple = bxy.time_getlocaltime(time_above_zero_list, ref_time=[2000, 1, 1, 0], t_source='US/Alaska')\n t_value, t_date = read_site.read_measurements\\\n (sno, tair_name, np.arange(1, 365), year0=year0, hr=18, t_unit='sec')\n\n\n tair_zero_day2 = data_process.zero_find(np.array([t_date, -t_value]), w=7, th=0) # in unit of sec\n tair_zero_day1 = data_process.zero_find_gt(np.array([t_date, t_value]), w=7, th=1)\n air_win = 7 # check days during window shown air temperature gt 0 degC\n w, w_valid = data_process.n_convolve3(t_value, air_win)\n air0_index0 = np.where(w>5)\n for ind0 in air0_index0[0]:\n if t_date[ind0] > bxy.get_total_sec('%d0307' % year0):\n tair_zero_day = t_date[ind0] - air_win*24*3600\n break\n # check\n zero_date = bxy.time_getlocaltime([tair_zero_day,tair_zero_day2, npr_date[0], ascat_date[0]],\n ref_time=[2000, 1, 1, 0], t_source=\"US/Alaska\")[-2]\n i_zero = np.where(bxy.time_getlocaltime(t_date, ref_time=[2000, 1, 1, 0],\n t_source=\"US/Alaska\")[-2] == zero_date[0])[0][0]\n t_check = t_value[i_zero - 3: i_zero + 4]\n air_0, air00 = read_site.read_measurements(sno, tair_name, 366+np.arange(50, 70), hr=18)\n a_extend = np.array([-3600*24, 3600*24])\n period0, period1 = np.array(sorted([tair_zero_day, npr_date])) + a_extend, \\\n np.array(sorted([tair_zero_day, ascat_date])) + a_extend\n snow_value, snow_date = read_site.read_measurements\\\n (sno, snd_name, np.arange(1, 365), year0=year0, hr=0, t_unit='sec')\n # get the in situ measurements during a period\n snow2date0 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period0)\n snow2date1 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period1)\n air2date0, air2date1 = data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period0),\\\n data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period1)\n return tair_zero_day, snow2date0, snow2date1, air2date0, air2date1", "def get_observable(stix_obj, log):\n res_obj = {}\n\n if stix_obj[u\"type\"] == \"relationship\":\n return None\n\n res_obj[u\"toxicity\"] = stix_obj.get(IBM_TOXICITY, \"\")\n res_obj[u\"relevance\"] = stix_obj.get(IBM_RELEVANCE, \"\")\n res_obj[u\"description\"] = get_observable_description(stix_obj, log)\n res_obj[u\"type\"] = get_observable_type(stix_obj, log)\n\n return res_obj", "def create_observation(self):", "def create_observation(self):", "def export_obs_meta(obs):\n log = toast.utils.Logger.get()\n\n # Construct observation frame and dictionary\n ob = c3g.G3Frame(c3g.G3FrameType.Observation)\n obmeta = dict()\n\n session = obs.session\n log.verbose(f\"Create observation frame for session {session.name}\")\n\n ob[\"telescope_name\"] = c3g.G3String(\n obs.telescope.focalplane.detector_data.meta[\"telescope\"]\n )\n obmeta[\"telescope_name\"] = str(\n obs.telescope.focalplane.detector_data.meta[\"telescope\"]\n )\n ob[\"telescope_uid\"] = c3g.G3Int(obs.telescope.uid)\n obmeta[\"telescope_uid\"] = int(obs.telescope.uid)\n\n ob[\"observing_session\"] = c3g.G3String(session.name)\n obmeta[\"observing_session\"] = str(session.name)\n\n ob[\"observing_session_uid\"] = c3g.G3Int(session.uid)\n obmeta[\"observing_session_uid\"] = int(session.uid)\n\n ob[\"observing_session_start\"] = t3g.to_g3_time(session.start.timestamp())\n obmeta[\"observing_session_start\"] = float(session.start.timestamp())\n\n ob[\"observing_session_end\"] = t3g.to_g3_time(session.end.timestamp())\n obmeta[\"observing_session_end\"] = float(session.end.timestamp())\n\n site = obs.telescope.site\n siteclass = toast.utils.object_fullname(site.__class__)\n ob[\"site_name\"] = c3g.G3String(site.name)\n ob[\"site_class\"] = c3g.G3String(siteclass)\n ob[\"site_uid\"] = c3g.G3Int(site.uid)\n obmeta[\"site_name\"] = str(site.name)\n obmeta[\"site_class\"] = str(siteclass)\n obmeta[\"site_uid\"] = int(site.uid)\n\n if isinstance(site, toast.instrument.GroundSite):\n ob[\"site_lat_deg\"] = c3g.G3Double(site.earthloc.lat.to_value(u.degree))\n ob[\"site_lon_deg\"] = c3g.G3Double(site.earthloc.lon.to_value(u.degree))\n ob[\"site_alt_m\"] = c3g.G3Double(site.earthloc.height.to_value(u.meter))\n obmeta[\"site_lat_deg\"] = float(site.earthloc.lat.to_value(u.degree))\n obmeta[\"site_lon_deg\"] = float(site.earthloc.lon.to_value(u.degree))\n obmeta[\"site_alt_m\"] = float(site.earthloc.height.to_value(u.meter))\n\n if site.weather is not None:\n if hasattr(site.weather, \"name\"):\n # This is a simulated weather object, dump it.\n ob[\"site_weather_name\"] = c3g.G3String(site.weather.name)\n obmeta[\"site_weather_name\"] = str(site.weather.name)\n ob[\"site_weather_realization\"] = c3g.G3Int(site.weather.realization)\n obmeta[\"site_weather_realization\"] = int(site.weather.realization)\n if site.weather.max_pwv is None:\n ob[\"site_weather_max_pwv\"] = c3g.G3String(\"NONE\")\n obmeta[\"site_weather_max_pwv\"] = \"NONE\"\n else:\n ob[\"site_weather_max_pwv\"] = c3g.G3Double(site.weather.max_pwv)\n obmeta[\"site_weather_max_pwv\"] = float(site.weather.max_pwv)\n ob[\"site_weather_time\"] = t3g.to_g3_time(site.weather.time.timestamp())\n obmeta[\"site_weather_time\"] = float(site.weather.time.timestamp())\n ob[\"site_weather_uid\"] = c3g.G3Int(site.weather.site_uid)\n obmeta[\"site_weather_uid\"] = int(site.weather.site_uid)\n ob[\"site_weather_use_median\"] = c3g.G3Bool(site.weather.median_weather)\n obmeta[\"site_weather_use_median\"] = bool(site.weather.median_weather)\n return ob, obmeta", "def get_noisy_samples(X, std):\n # std = STDEVS[subset][FLAGS.dataset][FLAGS.attack]\n X_noisy = np.clip(X + rand_gen.normal(loc=0.0, scale=std, size=X.shape), 0, 1)\n return X_noisy", "def observation_spec(self):\r\n pass", "def sampleTrajectoryNonUniform(self, XInit, theta, time, obsNoiseStd=None,\n SNR=None, plotting=None):\n def fODE(x, time):\n return self.f(x, theta)\n x = odeint(fODE, XInit, time, rtol=1e-8, mxstep=5000000) # huge for stiff problems\n noise = np.random.randn(x.shape[0], x.shape[1])\n if obsNoiseStd is None:\n signalStds = np.std(x, axis=0)\n obsNoiseStds = signalStds/np.sqrt(SNR)\n obsNoiseStds = obsNoiseStds.reshape([1, -1])\n obsNoiseStds = np.repeat(obsNoiseStds, x.shape[0], axis=0)\n else:\n obsNoiseStds = np.ones_like(x)*obsNoiseStd\n noise = noise*obsNoiseStds\n y = x + noise\n if plotting is not None:\n if not os.path.exists(plotting):\n os.makedirs(plotting)\n for state in np.arange(x.shape[1]):\n # set ticks\n fig = plt.figure()\n plot = fig.add_subplot(111)\n # set cross thickness\n plot.scatter(time, y[:, state], c='k', marker='.', s=100,\n linewidths=1)\n # set line thickness\n plot.plot(time, x[:, state], 'r', linewidth=2)\n # set label fontsize\n plt.xlabel(\"time\",\n fontsize=20)\n plt.ylabel(\"state {}\".format(state+1),\n fontsize=20)\n plt.setp(plot.get_xticklabels(),\n fontsize=20)\n plt.setp(plot.get_yticklabels(),\n fontsize=20)\n plt.tight_layout()\n plt.savefig(os.path.join(plotting,\n \"state{}.png\".format(state)),\n dpi=300)\n \n plt.close()\n return x, y", "def silicate(self):\n index = self.var_index(6)\n return self.var_data(index)", "def test_univariate_sim_and_obs_lik(self):\n\n d = SepiaData(t_sim=self.univ_data_dict['t_sim'], y_sim=self.univ_data_dict['y_sim'],\n y_obs=self.univ_data_dict['y_obs'])\n print('Testing univariate sim and obs SepiaLogLik...', flush=True)\n print(d, flush=True)\n\n # Do explicit transformation\n d.transform_xt()\n d.standardize_y()\n model = SepiaModel(d)\n\n model.logLik()\n\n for param in model.params.mcmcList:\n for cindex in range(int(np.prod(param.val_shape))):\n model.logLik(cvar=param.name, cindex=cindex)", "def getXS(self, interaction):\n return self.micros[interaction]", "def observation(self):\n # This consists of two things:\n # - the measured bitstrings\n # - the vectorized representation of the optimization problem\n #\n # In particular, the first 10*NUM_SHOTS (i.e. 100) entries are measured\n # qubit values. The remaining entries are the weights of the problem\n # graph.\n return np.concatenate([self.bitstrings.flatten(), self._prob_vec])", "def observation_value(self):\n pass", "def get_P_1obs_xi(self, obsname, dataID):\n covmat = self.covmat[obsname]\n\n ##### Get the follow-up observable, obsintr is used for setting up mass range\n if obsname=='Yx':\n obsmeas, obsintr, obserr = self.catalog['Yx_fid'][dataID], self.scaling['Dx'], self.catalog['Yx_err'][dataID]\n elif obsname=='Mgas':\n obsmeas, obsintr, obserr = self.catalog['Mg_fid'][dataID], self.scaling['Dx'], self.catalog['Mg_err'][dataID]\n elif obsname=='WLMegacam':\n LSSnoise = self.WLcalib['Megacam_LSS'][0] + self.scaling['MegacamScatterLSS'] * self.WLcalib['Megacam_LSS'][1]\n obsmeas, obserr, obsintr = .8*self.scaling['bWL_Megacam']*self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), .3, self.scaling['DWL_Megacam']\n elif obsname=='WLHST':\n LSSnoise = self.WLcalib['HST_LSS'][0] + self.scaling['HSTscatterLSS'] * self.WLcalib['HST_LSS'][1]\n obsmeas, obserr, obsintr = .8*self.scaling['bWL_HST']*self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), .3, self.scaling['DWL_HST']\n\n ##### Define reasonable mass range\n # xi -> M(xi)\n xi_minmax = np.array([max(2.6,self.catalog['xi'][dataID]-5), self.catalog['xi'][dataID]+3])\n M_xi_minmax = self.obs2mass('zeta', self.xi2zeta(xi_minmax), self.catalog['redshift'][dataID])\n if M_xi_minmax[0]>self.HMF['M_arr'][-1]:\n print \"cluster mass exceeds HMF mass range\", self.catalog['SPT_ID'][dataID],\\\n M_xi_minmax[0], self.HMF['M_arr'][-1]\n return 0\n\n # obs: prediction\n lnobs0 = np.log(self.mass2obs(obsname, self.obs2mass('zeta', self.xi2zeta(self.catalog['xi'][dataID]), self.catalog['redshift'][dataID]), self.catalog['redshift'][dataID]))\n SZscatterobs = self.dlnM_dlnobs('zeta') / self.dlnM_dlnobs(obsname, self.SZmPivot, self.catalog['redshift'][dataID]) * self.scaling['Dsz']\n intrscatter = (SZscatterobs**2 + obsintr**2)**.5\n obsthminmax = np.exp(np.array([lnobs0-5.*intrscatter, lnobs0+3.5*intrscatter]))\n M_obsth_minmax = self.obs2mass(obsname, obsthminmax, self.catalog['redshift'][dataID])\n # obs: measurement\n if obsname in ('Mgas', 'Yx'):\n obsmeasminmax = np.amax((.1, obsmeas-3*obserr)), obsmeas+3*obserr\n else:\n obsmeasminmax = np.exp(np.log(obsmeas)-4*obserr), np.exp(np.log(obsmeas)+3*obserr)\n M_obsmeas_minmax = self.obs2mass(obsname, np.array(obsmeasminmax), self.catalog['redshift'][dataID])\n\n ##### Define grid in mass\n Mmin, Mmax = min(M_xi_minmax[0], M_obsth_minmax[0], M_obsmeas_minmax[0]), max(M_xi_minmax[1], M_obsth_minmax[1], M_obsmeas_minmax[1])\n Mmin, Mmax = max(.5*Mmin, self.HMF['M_arr'][0]), min(Mmax, self.HMF['M_arr'][-1])\n lenObs = 54\n M_obsArr = np.logspace(np.log10(Mmin), np.log10(Mmax), lenObs)\n\n ##### Observable arrays\n lnzeta_arr = np.log(self.mass2obs('zeta', M_obsArr, self.catalog['redshift'][dataID]))\n xi_arr = self.zeta2xi(np.exp(lnzeta_arr))\n obsArr = self.mass2obs(obsname, M_obsArr, self.catalog['redshift'][dataID])\n\n ##### Add radial dependence for X-ray observables\n if obsname in ('Mgas','Yx'):\n # Angular diameter distances in current and reference cosmology [Mpc]\n dA = cosmo.dA(self.catalog['redshift'][dataID], self.cosmology)/self.cosmology['h']\n dAref = cosmo.dA(self.catalog['redshift'][dataID], cosmologyRef)/cosmologyRef['h']\n # R500 [kpc]\n rho_c_z = cosmo.RHOCRIT * cosmo.Ez(self.catalog['redshift'][dataID], self.cosmology)**2\n r500 = 1000 * (3*M_obsArr/(4*np.pi*500*rho_c_z))**(1/3) / self.cosmology['h']\n # r500 in reference cosmology [kpc]\n r500ref = r500 * dAref/dA\n # Xray observable at fiducial r500...\n obsArr*= (self.catalog['r500'][dataID]/r500ref)**self.scaling['dlnMg_dlnr']\n # ... corrected to reference cosmology\n obsArr*= (dAref/dA)**2.5\n\n lnobsArr = np.log(obsArr)\n\n ##### HMF array for convolution\n M_HMF_arr = M_obsArr\n\n ##### Convert self.HMF to dN/(dlnzeta dlnobs) = dN/dlnM * dlnM/dlnzeta * dlnM/dlnobs\n # This only matter if dlnM/dlnobs is mass-dependent, as for dispersions\n dN_dlnzeta_dlnobs = np.exp(self.HMF_interp(np.log(self.catalog['redshift'][dataID]), np.log(M_HMF_arr)))[0]\n\n ##### HMF on 2D observable grid\n HMF_2d_in = np.zeros((lenObs, lenObs))\n np.fill_diagonal(HMF_2d_in, dN_dlnzeta_dlnobs)\n\n ##### 2D convolution with correlated scatter [lnobs,lnzeta]\n pos = np.empty((lenObs,lenObs,2))\n pos[:,:,0], pos[:,:,1] = np.meshgrid(lnobsArr, lnzeta_arr, indexing='ij')\n kernel = multivariate_normal.pdf(pos, mean=(lnobsArr[27], lnzeta_arr[27]), cov=covmat)\n HMF_2d = signal.fftconvolve(HMF_2d_in, kernel, mode='same')\n\n # set to 0 if zeta<2\n HMF_2d[:,np.where(lnzeta_arr<np.log(2.))] = 0.\n\n # Set small negative values to zero (FFT noise)\n if np.any(HMF_2d<-1e-7):\n if np.abs(np.amin(HMF_2d))/np.amax(HMF_2d)>1e-6:\n print \"HMF_2d has negative entries:\",np.amin(HMF_2d), np.amax(HMF_2d)\n HMF_2d[np.where(HMF_2d<0)] = 0.\n\n # Safety check\n if np.all(HMF_2d==0.):\n print self.catalog['SPT_ID'][dataID],'HMF_2d is zero, det',np.linalg.det(covmat),self.scaling['Dsz'],obsintr,self.scaling['rhoSZX']\n return 0.\n\n ##### dN/(dxi dlnobs) = dN/(dlnzeta dlnobs) * dlnzeta/dxi [lnobs,xi]\n HMF_2d*= self.dlnzeta_dxi(xi_arr)[None,:]\n\n #### Convolve with xi measurement error [lnobs]\n dP_dlnobs = np.trapz(HMF_2d * norm.pdf(self.catalog['xi'][dataID], xi_arr[None,:], 1.), xi_arr, axis=1)\n\n\n ##### Evaluate likelihood\n #dP/dobs = dP/dlnobs * dlnobs/dobs = dP/dlnobs /obs\n dP_dobs = dP_dlnobs/obsArr\n # normalize\n dP_dobs/= np.trapz(dP_dobs, obsArr)\n\n ##### WL\n if obsname in ('WLHST', 'WLMegacam'):\n # Concolve with Gaussian LSS scatter\n if LSSnoise>0.:\n integrand = dP_dobs[None,:] * norm.pdf(obsArr[:,None], obsArr[None,:], LSSnoise)\n dP_dobs = np.trapz(integrand, obsArr, axis=1)\n dP_dobs/= np.trapz(dP_dobs, obsArr)\n # P(Mwl) from data\n Pwl = self.WL.like(self.catalog, dataID, obsArr, self.cosmology, self.MCrel, self.lnM500_to_lnM200)\n # Get likelihood\n likeli = np.trapz(Pwl*dP_dobs, obsArr)\n\n\n ##### X-ray\n else:\n # Get likelihood\n likeli = np.trapz(dP_dobs*norm.pdf(obsmeas, obsArr, obserr), obsArr)\n\n if getpull:\n integrand = dP_dobs[None,:] * norm.pdf(obsArr[:,None], obsArr[None,:], obserr)\n dP_dobs_obs = np.trapz(integrand, obsArr, axis=1)\n dP_dobs_obs/= np.trapz(dP_dobs_obs,obsArr)\n cumtrapz = integrate.cumtrapz(dP_dobs_obs,obsArr)\n perc = np.interp(obsmeas, obsArr[1:], cumtrapz)\n print self.catalog['SPT_ID'][dataID], '%.4f %.4f %.4f %.4e'%(self.catalog['xi'][dataID], self.catalog['redshift'][dataID], obsmeas, 2**.5 * ss.erfinv(2*perc-1))\n\n if ((likeli<0)|(np.isnan(likeli))):\n print self.catalog['SPT_ID'][dataID], obsname, likeli\n #np.savetxt(self.catalog['SPT_ID'][dataID],np.transpose((obsArr, dP_dobs)))\n return 0.\n\n\n return likeli", "def get_stim_onset_times(sessions, metadata_dict):\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict", "def test_multivariate_sim_and_obs_noD_lik(self):\n\n d = SepiaData(t_sim=self.multi_data_dict['t_sim'], y_sim=self.multi_data_dict['y_sim'],\n y_ind_sim=self.multi_data_dict['y_ind_sim'], y_obs=self.multi_data_dict['y_obs'],\n y_ind_obs=self.multi_data_dict['y_ind_obs'])\n print('Testing multivariate sim-only SepiaLogLik...', flush=True)\n print(d, flush=True)\n\n d.transform_xt()\n d.standardize_y()\n d.create_K_basis(5)\n model = SepiaModel(d)\n\n model.logLik()\n\n for param in model.params.mcmcList:\n for cindex in range(int(np.prod(param.val_shape))):\n model.logLik(cvar=param.name, cindex=cindex)", "def sample_action(self, obs):\n pass", "def print_obs(self,obs):\n print(obs)", "def my_record_vars(context, data):\n data1 = data.history(context.pair[0], 'close', 15, '1d')\n data2 = data.history(context.pair[1], 'close', 15, '1d')\n data1_s = not_stationary(data1)\n data2_s = not_stationary(data2)\n \n if(data1_s and data2_s):\n \n p_coint = coint(data1, data2)[1]\n if(p_coint < 0.1):\n diff = data1 - data2\n mu = diff.mean()\n sd = diff.std()\n context.pair[2] = True\n context.pair[3] = mu\n context.pair[4] = sd", "def example(self, s, d, s_len, d_len, snr):\n\t\ts, d, x, n_frames = self.mix(s, d, s_len, d_len, snr)\n\t\ts_STDCT = self.stdct_analysis(s)\n\t\td_STDCT = self.stdct_analysis(d)\n\t\tx_STDCT = self.stdct_analysis(x)\n\t\txi = self.xi(s_STDCT, d_STDCT)\n\t\txi_bar = self.xi_map.map(xi)\n\t\tcd = self.cd(s_STDCT, d_STDCT)\n\t\tcd_bar = self.cd_map.map(cd)\n\t\txi_cd_map = tf.concat([xi_bar, cd_bar], axis=-1)\n\t\treturn x_STDCT, xi_cd_map, n_frames", "def observation(self, observation):\n return Simple115StateWrapper.convert_observation(observation, self._fixed_positions)", "def neuron(epsp, last_post_spike_time, tau_m, i, n_action): \n rho0 = 60*10**(-3) # maximum firing rate\n chi = -5 # scales the refractory effect \n theta = 16 # \n delta_u = 2 # randomness of spiking behaviour \n \n refractory = chi*np.exp((-i + last_post_spike_time)/tau_m) \n u = np.sum(epsp, axis=0)[:,np.newaxis] + refractory # membrane potential \n \n # action cell activity \n rho_action = rho0*np.exp((u - theta)/delta_u) # probabilty of emitting a spike \n y = np.random.rand(n_action, 1) <= rho_action # realization of spike train \n \n last_post_spike_time[y] = i \n canc = 1 - y # 0 if postsynaptic neuron spiked, 1 if not \n \n return y, last_post_spike_time, canc.T", "def timeseries_report(self):\n try:\n n = self.n.value\n except AttributeError:\n n = self.n\n results = pd.DataFrame(index=self.variables.index)\n results['ICE Generation (kW)'] = self.variables['ice_gen']\n results['ICE On (y/n)'] = self.variables['on_ice']\n results['ICE P_min (kW)'] = self.p_min\n results['ICE Genset P_max (kW)'] = self.rated_power * n\n return results", "def __getitem__(self, i):\n return eos80.cp(\n self.nc.variables['SSS'].__getitem__(i),\n self.nc.variables['SST'].__getitem__(i),\n self.p)", "def call(self, obs):\n\t\tx = tf.convert_to_tensor(obs)\n\t\thidden_logs = self.hidden1(x)\n\t\thidden_vals = self.hidden2(x)\n\t\treturn self.logits(hidden_logs), self.value(hidden_vals)" ]
[ "0.551871", "0.5380339", "0.5367954", "0.52299434", "0.5219479", "0.50972205", "0.50261205", "0.4998411", "0.4998411", "0.49744838", "0.49730173", "0.49615476", "0.49530286", "0.49497813", "0.49444118", "0.49292263", "0.49218303", "0.49119952", "0.48962384", "0.48684102", "0.48508343", "0.48222297", "0.48096612", "0.48092026", "0.4800477", "0.47858387", "0.47726685", "0.47680464", "0.4766182", "0.4749273" ]
0.5608419
0
Method to expand the neighbor_word_list with synsets, hyponyms and hypernyms. used in get_neighbor_words_set()
def expand_synset(neighbor_word_list, language): if not expand_synset_activated or not language.__eq__("English"): return neighbor_word_list new_neighbor_word_set = set() for word in neighbor_word_list: new_neighbor_word_set.add(word) synsets = wn.synsets(word) synonyms_list = [item.name().split('.')[0] for item in synsets] # extract: "dog.n.01"->"dog" for new_word in synonyms_list: new_neighbor_word_set.add(new_word) for i in xrange(synonyms_list.__len__()): if synonyms_list[i].__eq__(word): hyponyms = synsets[i].hyponyms() hyponyms_list = [item.name().split('.')[0] for item in hyponyms] # extract: "dog.n.01"->"dog" for new_word1 in hyponyms_list: new_neighbor_word_set.add(new_word1) hypernyms = synsets[i].hypernyms() hypernyms_list = [item.name().split('.')[0] for item in hypernyms] # extract: "dog.n.01"->"dog" for new_word1 in hypernyms_list: new_neighbor_word_set.add(new_word1) return list(new_neighbor_word_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_neighbor_words_list(sentence, language):\n if language.__eq__(\"Spanish\") or language.__eq__(\"Catalan\"):\n sentence = sentence.getElementsByTagName('target')[0]\n\n left_list, right_list = get_left_right_lists(sentence, language)\n neighbor_word_list = []\n\n for item in left_list[-k:]:\n neighbor_word_list.append(item)\n for item in right_list[:k]:\n neighbor_word_list.append(item)\n\n neighbor_word_list = expand_synset(neighbor_word_list, language) # add synsets, hypernyms, hyponyms here\n\n return neighbor_word_list", "def wordnet_expansion(lexicon, n_words=None):\r\n if n_words is None:\r\n n_words = 5\r\n\r\n looker = wordnet.synsets\r\n expanded_lexicon = [\r\n [str(synonym.lemmas()[0].name()) for synonym in looker(word)[:n_words]]\r\n for word in lexicon\r\n ]\r\n\r\n return expanded_lexicon", "def build_word_relations():\n song_urls = lyricsorter.get_song_url_list()\n viablewords = find_viable_words()\n word_list = []\n relation_dict = {}\n for i, link in enumerate(song_urls):\n response = song_table.get_item(\n Key={\n 'id': link\n }\n )\n lyrics = []\n print(\"Working on song# {}\".format(str(i)))\n try:\n lyrics = response['Item']['lyric_array']\n except KeyError:\n pass\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n if w not in viablewords:\n lyrics[index][index2] = \"\"\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n __line_parse(index2, line, relation_dict, word_list)\n\n for i, word in enumerate(word_list):\n print(\"Inserting #{} word in wordlist of size {}\".format(str(i), str(len(word_list))))\n Item1 = {\n 'id': str(word + \"_1\"),\n \"words\": relation_dict[word][str(word + \"_1\")]\n }\n Item2 = {\n 'id': str(word + \"_2\"),\n \"words\": relation_dict[word][str(word + \"_2\")]\n }\n Item3 = {\n 'id': str(word + \"_3\"),\n \"words\": relation_dict[word][str(word + \"_3\")]\n }\n word_relation_table.put_item(\n Item=Item1\n )\n word_relation_table.put_item(\n Item=Item2\n )\n word_relation_table.put_item(\n Item=Item3\n )", "def set_subhead_searched_words(self):\n\n searched_words = []\n for i in range(self.subhead.shape[0]):\n if (\n self.subhead[\"searched_pair_word\"][i]\n in self.subhead[\"searched_unique_single_word_synonym\"][i]\n ):\n searched_words.append(\n self.subhead[\"searched_unique_single_word_synonym\"][i]\n )\n else:\n searched_words.append(\n self.subhead[\"searched_unique_single_word_synonym\"][i]\n + self.subhead[\"searched_pair_word\"][i]\n )\n self.subhead[\"searched_words\"] = searched_words", "def build_word_relations_4():\n song_urls = lyricsorter.get_song_url_list()\n viablewords = find_viable_words()\n word_list = []\n relation_dict = {}\n\n for i, link in enumerate(song_urls):\n print(\"parsing through song #{}\".format(str(i)))\n response = song_table.get_item(\n Key={\n 'id': link\n }\n )\n lyrics = []\n try:\n lyrics = response['Item']['lyric_array']\n except KeyError:\n pass\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n if w not in viablewords:\n lyrics[index][index2] = \"\"\n for index, line in enumerate(lyrics):\n for index2, w in enumerate(line):\n __line_parse_4(index2, line, relation_dict, word_list)\n\n print(len(word_list))\n for i, word in enumerate(word_list):\n print(\"inserting word #{} of {}\".format(str(i), str(len(word_list))))\n Item = {\n 'id': str(word + \"_4\"),\n \"words\": relation_dict[word][str(word + \"_4\")]\n }\n word_relation_table.put_item(\n Item=Item\n )\n print(\"added {}\".format(word))", "def update_adj_list(vertice):\n\n if vertice.shortcuts is None:\n return\n\n for passage, shortcut, weight_shortcut in vertice.shortcuts:\n \n vertice.adj_list.append([passage, shortcut, weight_shortcut])", "def get_hypernyms(word):\n syn = wn.synsets(word)\n hnyms = []\n for h in syn[0].hypernyms():\n hnyms.append({\n \"lemmas\": h.lemma_names(),\n \"d\": h.definition(),\n \"pos\": h.pos(),\n \"id\": h.name()\n })\n return hnyms", "def get_hyponyms(word):\n syn = wn.synsets(word)\n hnyms = []\n for h in syn[0].hyponyms():\n print h\n hnyms.append({\n \"lemmas\": h.lemma_names(),\n \"d\": h.definition(),\n \"pos\": h.pos(),\n \"id\": h.name()\n })\n return hnyms", "def merge_nw_nnw(self):\n nw_nnw = {}\n nw_dict = self.ontology.heirs_network_dictionary\n nnw_dict = self.model.networks_w_namednw_dict\n for label, things in nw_dict.items():\n nw_nnw[label] = things\n if label in nnw_dict.keys():\n nw_nnw[label] = set(nnw_dict[label])\n return nw_nnw", "def _set_neighs_general_list(self, key):\n ### WARNING: NOT WORK WITH EMPTY NEIGHS\n if '__len__' not in dir(key):\n self._set_neighs_number(key)\n else:\n if len(key) == 0:\n self._set_neighs_list_only(key)\n elif '__len__' not in dir(key[0]):\n self._set_neighs_list_only(key)\n else:\n if all([len(key[i]) == 0 for i in range(len(key))]):\n self._setted = False\n if self.staticneighs:\n self.idxs = np.array([[]])\n else:\n self.idxs = np.array([[[]]])\n elif '__len__' not in dir(key[0][0]):\n self._set_neighs_list_list(key)\n else:\n self._set_neighs_list_list_list(key)", "def _expand_suggestions(self):\n# global suggestions\n self.tree.item('suggestions', open=False, \\\n values=[self._count_children('suggestions'), ''])", "def expandW(w, n_hidden_units):\n i1 = 784 * n_hidden_units\n i2 = i1 + n_hidden_units\n i3 = i2 + n_hidden_units * 10\n i4 = i3 + 10\n assert i4 == w.size, str(i4) + ' ' + str(w.size)\n W1 = w[0:i1].reshape((n_hidden_units, 784))\n b1 = w[i1:i2]\n W2 = w[i2:i3].reshape((10, n_hidden_units))\n b2 = w[i3:i4]\n return W1, b1, W2, b2", "def hypernym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hyper = s.hypernyms()\n\n results = list()\n for h in hyper:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n return results[:sense]", "def _expand_main_list(self):\n\n # Compute how much to extend underlying list by\n new_length = self.resizing_factor * len(self.main_list)\n change_in_length = new_length - len(self.main_list)\n\n # Entend underlying list\n self.main_list.extend([None] * change_in_length)", "def get_synonyms(word):\n synsets = [];\n syns = wn.synsets(word)\n for ss in syns:\n lemmas = []\n for l in ss.lemmas():\n lemma = { \"name\": l.name(), \"related_forms\": [] }\n for x in l.derivationally_related_forms():\n lemma['related_forms'].append(x.name())\n lemmas.append(lemma)\n synsets.append({\n \"lemmas\": lemmas,\n \"d\": ss.definition(),\n \"pos\": ss.pos(),\n \"id\": ss.name()\n })\n return synsets", "def _set_neighs_list_list(self, key):\n if self._constant_neighs:\n key = np.array(key)\n if self.staticneighs:\n self.idxs = key\n self.ks = range(1) if self.ks is None else self.ks\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = [key for k in range(len_ks)]\n if type(key) == np.ndarray:\n self.idxs = np.array(self.idxs)\n if len(self.iss) != len(key):\n if len(self.iss) != len(key):\n self.iss = range(len(key))\n# if len(self.idxs[0]) > 0:\n# self.iss = list(range(len(self.idxs)))\n self._setted = True", "def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1", "def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def _set_neighs_list_list_list(self, key):\n self.ks = list(range(len(key))) if self.ks is None else self.ks\n if self._constant_neighs:\n self.idxs = np.array(key)\n else:\n self.idxs = key\n if len(self.idxs[0]) != len(self.iss):\n self.iss = list(range(len(self.idxs[0])))\n if self.staticneighs:\n self.idxs = self.idxs[0]\n self._setted = True", "def add_entry(wn, synset, lemma, idx=0, n=-1, change_list=None):\n print(\"Adding %s to synset %s\" % (lemma, synset.id))\n n_entries = len(empty_if_none(wn.members_by_id(synset.id)))\n entry_global = [entry for entry in empty_if_none(wn.entry_by_lemma(lemma))\n if wn.entry_by_id(entry).lemma.part_of_speech == synset.part_of_speech or\n wn.entry_by_id(entry).lemma.part_of_speech == PartOfSpeech.ADJECTIVE and synset.part_of_speech == PartOfSpeech.ADJECTIVE_SATELLITE or\n wn.entry_by_id(entry).lemma.part_of_speech == PartOfSpeech.ADJECTIVE_SATELLITE and synset.part_of_speech == PartOfSpeech.ADJECTIVE]\n\n if len(entry_global) == 1:\n entry_global = wn.entry_by_id(entry_global[0])\n n_senses = len(entry_global.senses)\n else:\n entry_global = None\n n_senses = 0\n\n if idx <= 0:\n idx = n_entries + 1\n elif idx > n_entries + 1:\n raise Exception(\"IDX value specified is higher than number of entries\")\n elif idx == n_entries + 1:\n pass\n else:\n for sense_id in sense_ids_for_synset(wn, synset):\n this_idx = int(sense_id[-2:])\n if this_idx >= idx:\n change_sense_idx(wn, sense_id, this_idx + 1)\n\n if n < 0:\n n = n_senses\n elif n > n_senses:\n raise Exception(\"n value exceeds number of senses for lemma\")\n elif n == n_senses:\n pass\n else:\n sense_n = 0\n for sense in entry_global.senses:\n if sense_n >= n:\n change_sense_n(wn, entry_global, sense.id, sense_n + 1)\n sense_n += 1\n\n wn_synset = wn\n entries = [entry for entry in empty_if_none(wn_synset.entry_by_lemma(\n lemma)) if wn.entry_by_id(entry).lemma.part_of_speech == synset.part_of_speech]\n\n if entries:\n if len(entries) != 1:\n raise Exception(\"More than one entry for part of speech\")\n print(\"Found an entry!\")\n wn_entry = wn.entry_by_id(entries[0])\n entry = wn_synset.entry_by_id(entries[0])\n sense = Sense(\n id=\"oewn-%s-%s-%s-%02d\" %\n (escape_lemma(lemma),\n synset.part_of_speech.value,\n synset_key(\n synset.id),\n idx),\n synset=synset.id,\n n=n,\n sense_key=None)\n\n wn_entry.senses.append(sense)\n entry.senses.append(sense)\n sense.sense_key = get_sense_key(wn, entry, sense, synset.lex_name)\n if sense.synset not in wn.members:\n wn.members[sense.synset] = []\n wn.members[sense.synset].append(wn_entry.lemma.written_form)\n else:\n n = 0\n print(\"Creating new entry\")\n entry = LexicalEntry(\n \"oewn-%s-%s\" % (escape_lemma(lemma), synset.part_of_speech.value))\n entry.set_lemma(Lemma(lemma, synset.part_of_speech))\n sense = Sense(\n id=\"oewn-%s-%s-%s-%02d\" %\n (escape_lemma(lemma),\n synset.part_of_speech.value,\n synset_key(\n synset.id),\n idx),\n synset=synset.id,\n n=n,\n sense_key=None)\n entry.add_sense(sense)\n sense.sense_key = get_sense_key(wn, entry, sense, synset.lex_name)\n wn.add_entry(entry)\n if change_list:\n change_list.change_entry(wn, entry)\n return entry", "def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def expand2(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n dist_from_pattern = self.dist[network.getrow(neighb).indices] \n dist_of_added = dist_from_pattern[dist_from_pattern > -1].min() + 1\n if dist_of_added > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((pred, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_of_added\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def addNeighbor(self, neighbor):", "def weed_out_synonyms(word, potential_synonyms):\n real_synonyms = set()\n for synonym in potential_synonyms:\n max_distance = abs(len(word) - len(synonym))\n abbr_len = min(len(word), len(synonym))\n forgiveness = round(1/7 * abbr_len)\n if lev.distance(word, synonym) <= max_distance + forgiveness:\n # Then it's a synonym!\n real_synonyms.add(synonym)\n return real_synonyms", "def set_neighbours(self,knodes):\n self.neighbours = []\n for kn in knodes:\n # Make sure we don't have ourselves as a neighbour:\n if kn.ident == self.ident:\n continue\n # A neighbour has a path length 1:\n self.neighbours.append(\\\n kn._replace(path_len=1))\n\n\n # Update known nodes:\n self.add_known_nodes(0,self.neighbours)", "def _get_all_insertions(synonym, enc_word, ins_word, solution_format=None):\n words = [enc_word[0:i] + ins_word + enc_word[i:] for i in range(1, len(enc_word))]\n if solution_format is not None:\n words = [solution_format.add_spaces(word) for word in words if solution_format.check(word)]\n\n solutions = [(word, SimilaritySolver.solve(synonym, 0, word.replace(\" \", \"_\"))) for word in words]\n return solutions", "def expand(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n preds = list(set(network.getrow(neighb).indices) & self.genes) \n if len(preds)>2:\n pass\n dist_seed = self.dist[preds].min() + 1\n if dist_seed > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n next_pattern.edges |= set((pred, neighb) for pred in preds) \n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_seed\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)" ]
[ "0.6233785", "0.54712224", "0.5385365", "0.53399545", "0.5255301", "0.5233051", "0.5192063", "0.51569444", "0.5123623", "0.50904995", "0.5077879", "0.50651765", "0.50616145", "0.5047993", "0.5031947", "0.5022816", "0.4951013", "0.4943876", "0.49117774", "0.49117774", "0.48616078", "0.4846504", "0.48395777", "0.48346737", "0.48318562", "0.48224765", "0.48007676", "0.47968182", "0.47910467", "0.47907305" ]
0.806715
0
Method to read the test set, parse the test data, and map each instance into a vector space
def parse_test_data(test_set, training_output, language): print "Reading test set: " + test_set xmldoc = minidom.parse(test_set) data = {} lex_list = xmldoc.getElementsByTagName('lexelt') for node in lex_list: lexelt = node.getAttribute('item') # item "active.v" data[lexelt] = [] inst_list = node.getElementsByTagName('instance') for inst in inst_list: instance_id = inst.getAttribute('id') # id "activate.v.bnc.00024693" neighbor_word_list = training_output[lexelt]["neighbor_word_list"] _4c_4d_feature = training_output[lexelt]["4c_4d_feature"] x = extract_vector(inst, neighbor_word_list, _4c_4d_feature, language) data[lexelt].append((instance_id, x)) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, test_data):\n with open(test_data, 'r') as test_data:\n results = {}\n for type in self.label_type_map:\n results[self.label_type_map[type]] = []\n while True:\n tokens = test_data.readline().split()\n pos = test_data.readline().split()\n indices = test_data.readline().split()\n if not tokens or not pos or not indices:\n break\n curr_results = self.viterbi(tokens)\n intervals = self.extract_intervals(curr_results, indices)\n for type in intervals:\n for interval in intervals[type]:\n results[type].append(interval)\n self.write_results(results)", "def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)", "def load_testset(self, fn):\n w = codecs.open(fn, 'r', 'utf-8')\n data = w.read().split('\\n')[:-1]\n w.close()\n\n # split labels and sentences\n data = [i.split(':') for i in data]\n # reverse elements and connect subsentences in case of additional colons\n self.test_set = [(':'.join(z[1:]), z[0]) for z in data]\n return self.test_set", "def test_reading_and_writing_of_vector_point_data(self):\n\n # First test that some error conditions are caught\n filename = unique_filename(suffix='nshoe66u')\n try:\n read_layer(filename)\n except ReadLayerError:\n pass\n else:\n msg = 'Exception for unknown extension should have been raised'\n raise Exception(msg)\n\n filename = unique_filename(suffix='.gml')\n try:\n read_layer(filename)\n except ReadLayerError:\n pass\n else:\n msg = 'Exception for non-existing file should have been raised'\n raise Exception(msg)\n\n # Read and verify test data\n for vectorname in ['test_buildings.shp',\n 'tsunami_building_exposure.shp',\n 'Padang_WGS84.shp',\n ]:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = numpy.array(layer.get_geometry())\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert coords.shape[0] == N\n assert coords.shape[1] == 2\n\n assert FEATURE_COUNTS[vectorname] == N\n\n assert isinstance(layer.get_name(), basestring)\n\n # Check projection\n wkt = layer.get_projection(proj4=False)\n assert wkt.startswith('GEOGCS')\n\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check integrity of each feature\n field_names = None\n for i in range(N):\n # Consistency between of geometry and fields\n\n x1 = coords[i, 0]\n x2 = attributes[i]['LONGITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n x1 = coords[i, 1]\n x2 = attributes[i]['LATITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n # Verify that each feature has the same fields\n if field_names is None:\n field_names = attributes[i].keys()\n else:\n assert len(field_names) == len(attributes[i].keys())\n assert field_names == attributes[i].keys()\n\n # Write data back to file\n # FIXME (Ole): I would like to use gml here, but OGR does not\n # store the spatial reference! Ticket #18\n out_filename = unique_filename(suffix='.shp')\n Vector(geometry=coords, data=attributes, projection=wkt,\n geometry_type='point').write_to_file(out_filename)\n\n # Read again and check\n layer = read_layer(out_filename)\n assert layer.is_point_data\n coords = numpy.array(layer.get_geometry())\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert coords.shape[0] == N\n assert coords.shape[1] == 2\n\n # Check projection\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check integrity of each feature\n field_names = None\n for i in range(N):\n\n # Consistency between of geometry and fields\n x1 = coords[i, 0]\n x2 = attributes[i]['LONGITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n x1 = coords[i, 1]\n x2 = attributes[i]['LATITUDE']\n assert x2 is not None\n msg = 'Inconsistent longitudes: %f != %f' % (x1, x2)\n assert numpy.allclose(x1, x2), msg\n\n # Verify that each feature has the same fields\n if field_names is None:\n field_names = attributes[i].keys()\n else:\n assert len(field_names) == len(attributes[i].keys())\n assert field_names == attributes[i].keys()\n\n # Test individual extraction\n lon = layer.get_data(attribute='LONGITUDE')\n assert numpy.allclose(lon, coords[:, 0])", "def test__load_training_set():\n classifier = classifier_module.Classifier(None)\n set = classifier._load_training_set('test')\n for i in range(0, 5):\n signal_list = set[i]\n assert signal_list[0].get_x() == 1.0 + i * 0.028\n assert signal_list[0].get_y() == 1.00 - i * i * 0.20 * 0.30\n\n assert signal_list[1].get_x() == 2.0 - i * 0.011\n assert signal_list[1].get_y() == 2.00 - i * 0.020", "def read_test_tuples():\n lines = read_input(25, True)\n point_sets = list(parse_points(lines))\n expected_counts = [4, 3, 8]\n\n return zip(point_sets, expected_counts)", "def load_data(directory: str, dataset_str: str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"{}/ind.{}.{}\".format(directory, dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"{}/ind.{}.test.index\".format(directory, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return graph, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def train(self, test_vector):\n\t\twith open(self.PATH + '/src/data/train_emma.csv', 'rt') as f:\n\t\t\treader = csv.reader(f)\n\n\t\t\ttrain_data = dict()\n\t\t\ttrain_data_labels = list()\n\t\t\ttrain_data_list = []\n\t\t\ttrain_data_labels_list = []\n\n\t\t\tnext(reader, None)\n\t\t\tfor row in reader:\n\t\t\t\tfor idx in range(len(row)):\n\t\t\t\t\tif idx == 0:\n\t\t\t\t\t\ttrain_data['file'] = row[idx]\n\t\t\t\t\tif idx == 1:\n\t\t\t\t\t\ttrain_data['line'] = int(row[idx])\n\t\t\t\t\tif idx == 2:\n\t\t\t\t\t\ttrain_data['timestamp'] = row[idx]\n\t\t\t\t\tif idx == 3:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\t\t\t\t\tif idx == 4:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\n\t\t\t\ttrain_data_list.append(train_data)\n\t\t\t\ttrain_data_labels_list.append(train_data_labels)\n\t\t\t\ttrain_data = dict()\n\t\t\t\ttrain_data_labels = list()\n\n\t\t\tC = 0.8\n\t\t\tdict_vectorizer = DictVectorizer(sparse=False)\n\t\t\ttrain_data_trasformed = dict_vectorizer.fit_transform(train_data_list)\n\t\t\ttest_vector_transformed = dict_vectorizer.transform(test_vector)\n\n\t\t\t# print(dict_vectorizer.get_feature_names())\n\t\t\t# print(dict_vectorizer.inverse_transform(train_data_trasformed))\n\n\t\t\t# print('Inverse transformation !!!')\n\t\t\t# print(test_vector)\n\t\t\t# inv_trans = dict_vectorizer.inverse_transform(test_vector_transformed)\n\n\t\t\t# fit LinearSVC\n\t\t\t# multi label binarizer to convert iterable of iterables into processing format\n\t\t\tmlb = MultiLabelBinarizer()\n\t\t\ty_enc = mlb.fit_transform(train_data_labels_list)\n\n\t\t\ttrain_vector = OneVsRestClassifier(svm.SVC(probability=True))\n\t\t\tclassifier_rbf = train_vector.fit(train_data_trasformed, y_enc)\n\n\t\t\t# test_vecc = cnt_vectorizer.fit_transform(X[:, 0])\n\t\t\t# # todo use pickle to persist\n\t\t\t# test_vector_reshaped = np.array(test_vector.ravel()).reshape((1, -1))\n\t\t\tprediction = classifier_rbf.predict(test_vector_transformed)\n\n\n\t\t\tprint(\"Predicted usernames: \\n\")\n\t\t\t# print(prediction)\n\t\t\t# print(mlb.inverse_transform(prediction))\n\n\t\t\tusers = self.parse_prediction(mlb.inverse_transform(prediction))\n\t\t\tprint(users)\n\t\t\treturn users", "def load_data(dataset_str):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\n \"data/corpus/{}/{}.test.index\".format(dataset_str, dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)\n\n # training nodes are training docs, no initial features\n # print(\"x: \", x)\n # test nodes are training docs, no initial features\n # print(\"tx: \", tx)\n # both labeled and unlabeled training instances are training docs and words\n # print(\"allx: \", allx)\n # training labels are training doc labels\n # print(\"y: \", y)\n # test labels are test doc labels\n # print(\"ty: \", ty)\n # ally are labels for labels for allx, some will not have labels, i.e., all 0\n # print(\"ally: \\n\")\n # for i in ally:\n # if(sum(i) == 0):\n # print(i)\n # graph edge weight is the word co-occurence or doc word frequency\n # no need to build map, directly build csr_matrix\n # print('graph : ', graph)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n # print(len(labels))\n\n idx_test = test_idx_range.tolist()\n # print(idx_test)\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def load_data(dataset_str, isnormalize):\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\t# This normalization is used for cora but not citeseer\n if isnormalize == 1:\n adj = adj+ sp.eye(adj.shape[0])\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)\n # end of normalization.\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n \n\t\n return adj, normalize(features).toarray(), y_train, y_val, y_test, train_mask, val_mask, test_mask, labels", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def load_data():\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n\r\n # Load the data\r\n\r\n with open(\"clean_real.txt\", 'r') as RealNews:\r\n RealStrAr = RealNews.read().split('\\n')\r\n\r\n with open(\"clean_fake.txt\", 'r') as FakeNews:\r\n FakeStrAr = FakeNews.read().split('\\n')\r\n\r\n # Preprocess it using a vectorizer\r\n\r\n MyCoolVectorizer = CountVectorizer()\r\n X = MyCoolVectorizer.fit_transform(RealStrAr + FakeStrAr)\r\n\r\n RealLabels = np.ones((len(RealStrAr), 1)) # means real\r\n FakeLabels = np.zeros((len(FakeStrAr), 1)) # means fake\r\n AllLabels = np.append(RealLabels, FakeLabels, axis=0)\r\n\r\n FinalTensor = np.append(X.toarray(), AllLabels, axis=1)\r\n\r\n # Randomize it and split it\r\n\r\n np.random.shuffle(FinalTensor)\r\n\r\n # divide and multiply by 2 just to make sure it's even\r\n ROUGHLY70 = 2 * ((FinalTensor.shape[0] * 70 / 100) / 2)\r\n ROUGHLY15 = (FinalTensor.shape[0] - ROUGHLY70) / 2\r\n\r\n # TEST SET VALIDATION SET TRAINING SET DICTIONARY\r\n return (FinalTensor[:ROUGHLY15], FinalTensor[ROUGHLY15 : 2 * ROUGHLY15], FinalTensor[-ROUGHLY70:], MyCoolVectorizer.get_feature_names())", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def parse_data():\n n_train, n_test = 15000, 4996\n n_features = 1355191\n\n print('- parsing train data')\n X_train = sp.lil_matrix((n_train, n_features))\n y_train = np.zeros(n_train)\n with open('/Users/kitazawa/data/news20.train') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_train[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_train[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_train.nnz / (n_train * n_features)))\n\n print('- parsing test data')\n X_test = sp.lil_matrix((n_test, n_features))\n y_test = np.zeros(n_test)\n with open('/Users/kitazawa/data/news20.test') as f:\n lines = map(lambda l: l.rstrip().split(' '), f.readlines())\n for i, line in enumerate(lines):\n y_test[i] = int(line[0])\n\n for fv in line[1:]:\n f, v = fv.split(':')\n X_test[i, (int(f) - 1)] = float(v)\n print('-- density: %f' % (X_test.nnz / (n_test * n_features)))\n\n return X_train, y_train, X_test, y_test", "def test_pipeline_basic(self):\n raw_data = pd.read_csv(\n join(get_test_data_file(), INPUT_DATA_FILENAME), encoding=\"iso-8859-1\"\n )\n\n # preprocessing\n processed_df = preprocess_pipeline(raw_data)\n\n # assumptions about the data\n # check number of rows\n assert processed_df.shape[0] > 0\n\n vectorizer, term_document_matrix, word_list = create_tfidf_term_document_matrix(\n processed_df\n )\n\n assert vectorizer is not None\n assert term_document_matrix is not None\n assert word_list is not None", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def predictSet(self, testData=\"\"):\n rawTestDataDump = self._read_file(testData)\n formattedTestData = [line.split(' ') for line in rawTestDataDump.split('\\n')]\n for test in formattedTestData:\n self._predictions.append(self.predict(test))\n return self._predictions", "def test_reading_and_writing_of_vector_line_data(self):\n\n # Read and verify test data\n vectorname = 'indonesia_highway_sample.shp'\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n\n assert len(geometry) == N\n assert len(attributes) == N\n assert len(attributes[0]) == 3\n\n assert FEATURE_COUNTS[vectorname] == N\n assert isinstance(layer.get_name(), basestring)\n\n # Check projection\n wkt = layer.get_projection(proj4=False)\n assert wkt.startswith('GEOGCS')\n\n assert layer.projection == Projection(DEFAULT_PROJECTION)\n\n # Check each line\n for i in range(N):\n geom = geometry[i]\n n = geom.shape[0]\n # A line should have more than one point.\n assert n > 1\n # A point should have two dimensions.\n assert geom.shape[1] == 2\n\n # Check that not all points are the same\n max_dist = 0\n for j in range(n):\n d = numpy.sum((geom[j] - geom[0]) ** 2) / n\n if d > max_dist:\n max_dist = d\n assert max_dist > 0\n\n expected_features = {0: {'LANES': 2,\n 'TYPE': 'primary',\n 'NAME': 'route1'},\n 1: {'LANES': 1,\n 'TYPE': 'secondary',\n 'NAME': 'route2'}}\n\n for i in range(N):\n # Consistency with attributes read manually with qgis\n\n if i in expected_features:\n att = attributes[i]\n exp = expected_features[i]\n\n for key in exp:\n msg = ('Expected attribute %s was not found in feature %i'\n % (key, i))\n assert key in att, msg\n\n a = att[key]\n e = exp[key]\n msg = 'Got %s: \"%s\" but expected \"%s\"' % (key, a, e)\n assert a == e, msg\n\n # Write data back to file\n # FIXME (Ole): I would like to use gml here, but OGR does not\n # store the spatial reference! Ticket #18\n out_filename = unique_filename(suffix='.shp')\n Vector(geometry=geometry, data=attributes, projection=wkt,\n geometry_type='line').write_to_file(out_filename)\n\n # Read again and check\n layer = read_layer(out_filename)\n assert layer.is_line_data\n geometry_new = layer.get_geometry()\n attributes_new = layer.get_data()\n\n N = len(layer)\n assert len(geometry_new) == N\n assert len(attributes_new) == N\n\n for i in range(N):\n assert numpy.allclose(geometry[i],\n geometry_new[i],\n rtol=1.0e-6) # OGR works in single precision\n\n assert len(attributes_new[i]) == 3\n for key in attributes_new[i]:\n assert attributes_new[i][key] == attributes[i][key]", "def testData(self, ):\n count = 0\n while count < len(self.RAD_sequences_test):\n RAD_filename = self.RAD_sequences_test[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"test_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1", "def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)", "def load_dataset(self, testPrefix = 'cv9', root = 'datasets', classes = [ 'pos', 'neg' ]):\n\n\t\tfor senti_class in classes:\n\n\t\t\tdirname = os.path.join(root, senti_class)\n\n\t\t\tfor filename in os.listdir(dirname):\n\n\t\t\t\twith open(os.path.join(dirname, filename)) as file:\n\n\t\t\t\t\tcontent = file.read()\n\n\t\t\t\t\tif filename.startswith(testPrefix):\n\t\t\t\t\t\t# Testing data\n\t\t\t\t\t\tself.testing_set.append(content)\n\t\t\t\t\t\tself.testing_labels.append(senti_class)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Training data\n\t\t\t\t\t\tself.training_set.append(content)\n\t\t\t\t\t\tself.training_labels.append(senti_class)\n\n\t\tself._vectorize(self.vectorizer)", "def runTest(self):\n # workaround for Python 2.6\n if self.skip:\n return\n\n f = Dataset(self.file, 'a')\n w = f.variables[\"vl2\"]\n v = f.variables[\"vl\"]\n w[0:3] = np.arange(3, dtype=np.float64)\n v[0] # sometimes crashes\n v[0].tolist() # sometimes crashes\n v[0].size # BOOM!\n f.close()", "def _create_examples(self, data_dir, set_type):\n\t\texamples = []\n\t\tinput_file_data = os.path.join(data_dir, \"data.tsv\")\n\t\twith open(input_file_data, \"r\", encoding=\"utf-8-sig\") as f:\n\t\t\tfor i, inp in enumerate(f):\n\t\t\t\tinps = inp.split('\\t') \n\t\t\t\tguid = \"%s-%s\" % (set_type, i)\n\t\t\t\ttext_inp = inps[1].strip()\n\t\t\t\ttext_out = inps[2].strip()\n\t\t\t\texamples.append(InputExample(guid=guid, text_inp=text_inp, text_out=text_out))\n\t\t\t\t\n\t\t\t# Sort these out before returning\n\t\t\texamples = sorted(examples, key=sort_inp_len)\n\t\t\treturn examples", "def test_save_and_load_svmlight_file(self):\n self.logger.info(\"Testing libsvm dataset loading and saving...\")\n\n test_file = fm.join(fm.abspath(__file__), \"myfile.libsvm\")\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e\n\n self.logger.info(\"Patterns saved:\\n{:}\".format(self.patterns))\n self.logger.info(\"Labels saved:\\n{:}\".format(self.labels))\n\n CDataLoaderSvmLight.dump(\n CDataset(self.patterns, self.labels), test_file)\n\n new_dataset = CDataLoaderSvmLight().load(test_file)\n\n self.assertFalse((new_dataset.X != self.patterns).any())\n self.assertFalse((new_dataset.Y != self.labels).any())\n\n # load data but now remove all zero features (colums)\n new_dataset = CDataLoaderSvmLight().load(\n test_file, remove_all_zero=True)\n\n self.logger.info(\"Patterns loaded:\\n{:}\".format(new_dataset.X))\n self.logger.info(\"Labels loaded:\\n{:}\".format(new_dataset.Y))\n self.logger.info(\n \"Mapping back:\\n{:}\".format(new_dataset.header.idx_mapping))\n\n self.assertTrue(new_dataset.X.issparse)\n self.assertTrue(new_dataset.Y.isdense)\n self.assertTrue(new_dataset.header.idx_mapping.isdense)\n\n # non-zero elements should be unchanged\n self.assertEqual(self.patterns.nnz, new_dataset.X.nnz)\n new_nnz_data = new_dataset.X.nnz_data\n self.assertFalse((self.patterns.nnz_data != new_nnz_data.sort()).any())\n\n # With idx_mapping we should be able to reconstruct original data\n original = CArray.zeros(self.patterns.shape, sparse=True)\n original[:, new_dataset.header.idx_mapping] = new_dataset.X\n self.assertFalse((self.patterns != original).any())\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def __init__(self, inFileString, testFileString, attLength):\r\n self.nrActions = 2\r\n self.headerList = []\r\n self.numAttributes = 0 # Saves the number of attributes in the input file.\r\n self.numSamples = 0 \r\n self.attributeLength = attLength\r\n self.attributeCombos = 3\r\n self.testFileString = testFileString \r\n self.classPosition = 0\r\n\r\n #Final data objects.\r\n self.fTrainData = self.formatData(inFileString,True)\r\n self.fTestData = self.formatData(testFileString,False)\r\n print len(self.fTrainData)\r\n print len(self.fTestData)" ]
[ "0.66019076", "0.62800115", "0.6211686", "0.6174027", "0.60878825", "0.60251486", "0.60153896", "0.599982", "0.59790295", "0.5974165", "0.5945519", "0.59419864", "0.59393996", "0.5916594", "0.59055924", "0.58963406", "0.587455", "0.5853635", "0.5831062", "0.5817713", "0.58151424", "0.5806295", "0.58053356", "0.57807916", "0.57799935", "0.57779443", "0.5772635", "0.5767003", "0.5757688", "0.5709809" ]
0.66560996
0
This runs a command on the remote host. This returns a pexpect.spawn object. This handles the case when you try to connect to a new host and ssh asks you if you want to accept the public key fingerprint and continue connecting.
def ssh_command (user, host, password, command): ssh_newkey = 'Are you sure you want to continue connecting (yes/no)?' child = pexpect.spawn('ssh -l %s %s %s'%(user, host, command)) i = child.expect([ssh_newkey, PASSWORD, pexpect.TIMEOUT]) if i == 0: # First Time access - send yes to connect. child.sendline ('yes') child.expect (PASSWORD) i = child.expect([PASSWORD,pexpect.TIMEOUT]) if i == 0: # prompted for password child.sendline(password) elif i == 1: # Got Timeout print 'ERROR!' print 'SSH could not login. Here is what SSH said:' print child.before, child.after print str(child) return None if i == 1: # Asked for Password - provide it. child.sendline(password) elif i == 2: print 'ERROR!' print 'SSH could not login. Here is what SSH said:' print child.before, child.after print str(child) return None return child
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_remote(ipaddr, command):\n\n\treturn execute(['ssh', '-f', '-t', '-oPasswordAuthentication=no',\n\t\t'-l', 'alt', ipaddr, command])", "def ssh(pi):\n command = \"ssh {0}\".format(pi)\n subprocess.Popen(command, shell=True)", "def _remote_cmd(self, cmd, block=True):\n s = remote_cmd(host_ip=self.ip, username=self.username,\n password=self.password, command=cmd, block=block)\n\n if s.get('status') == \"Failed\":\n raise AssertionError(s.get('error', \"Error encountered\"))\n\n return s", "def ssh(host, command, fork=False, parallel=False, user=\"root\", debug=False):\n global __parallel_ssh_results\n args = [\"ssh\", \n \"-o\", \"StrictHostKeyChecking=no\", \n \"-o\", \"ConnectTimeout=15\",\n ]\n if KEYFILE:\n args.extend([\"-i\", KEYFILE])\n args.append(host)\n if fork:\n command += \" </dev/null >/dev/null 2>&1 &\"\n args.append(command)\n if debug:\n print 'ssh %s %s' % (host, command)\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result = p.communicate()\n if parallel:\n __parallel_ssh_results[host] = result\n if debug:\n print host\n print '\\t', 'stdout:', result[0]\n print '\\t', 'stderr:', result[1]\n return (host, result)", "def command(\n cmd,\n hostname=None,\n username=None,\n key_filename=None,\n timeout=None,\n connection_timeout=None,\n port=22,\n background=False,\n) -> Union[None, SSHCommandResult]:\n if hostname is None:\n raise ValueError(\"Can not start SSH client. The 'hostname' argument is missing.\")\n if timeout is None:\n timeout = COMMAND_TIMEOUT\n if connection_timeout is None:\n connection_timeout = CONNECTION_TIMEOUT\n if background:\n with get_channel(\n hostname=hostname, username=username, key_filename=key_filename, timeout=timeout, port=port\n ) as channel:\n channel.exec_command(cmd)\n else:\n with get_connection(\n hostname=hostname, username=username, key_filename=key_filename, timeout=connection_timeout, port=port\n ) as connection:\n return execute_command(cmd, connection, timeout, connection_timeout)", "def issue_command(username, password, host, command):\n\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Try the SSH but log to our running log when there's a problem\n\n try:\n # http://yenonn.blogspot.co.uk/2013/10/python-in-action-paramiko-handling-ssh.html\n remote_conn_pre.connect(host, username=username, password=password, allow_agent=False)\n except paramiko.AuthenticationException, e:\n ssh_error = (host + \", Authentication Error: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except paramiko.SSHException, e:\n ssh_error = (host + \", SSH Error: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except paramiko.BadHostKeyException, e:\n ssh_error = (host + \", BadHostKey: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except socket.error, e:\n ssh_error = (host + \", Connection Failed: \" + str(e) + \"\\n\")\n return [1, \"\", \"\", ssh_error]\n\n \n transport = remote_conn_pre.get_transport()\n pause = 1 \n ssh_error = \"\"\n chan = transport.open_session()\n chan.exec_command(command)\n pause = 1\n buff_size = 1024\n stdout = \"\"\n stderr = \"\"\n\n while not chan.exit_status_ready():\n time.sleep(pause)\n if chan.recv_ready():\n stdout += chan.recv(buff_size)\n\n if chan.recv_stderr_ready():\n stderr += chan.recv_stderr(buff_size)\n\n exit_status = chan.recv_exit_status()\n # Need to gobble up any remaining output after program terminates...\n while chan.recv_ready():\n stdout += chan.recv(buff_size)\n\n while chan.recv_stderr_ready():\n stderr += chan.recv_stderr(buff_size)\n\n return [exit_status, stdout, stderr, ssh_error]", "def remote_command(task: Task, command: str) -> Result:\n client = task.host.get_connection(\"paramiko\", task.nornir.config)\n connection_state = task.host.get_connection_state(\"paramiko\")\n\n chan = client.get_transport().open_session()\n\n if connection_state[\"ssh_forward_agent\"]:\n AgentRequestHandler(chan)\n\n chan.exec_command(command)\n\n with chan.makefile() as f:\n stdout = f.read().decode()\n with chan.makefile_stderr() as f:\n stderr = f.read().decode()\n\n exit_status_code = chan.recv_exit_status()\n\n if exit_status_code:\n raise CommandError(command, exit_status_code, stdout, stderr)\n\n result = stderr if stderr else stdout\n return Result(result=result, host=task.host, stderr=stderr, stdout=stdout)", "def run_ssh_cmd(host, command, work_dir=None, username=None,\n key_filename=None, _connection=None):\n # If no connection passed in create our own\n if _connection is None:\n ssh = ssh_conn.connect(host, username, key_filename)\n else:\n ssh = _connection\n\n # Handle Working Directory\n if work_dir is not None:\n command = \"cd %s && %s\" % (work_dir, command)\n\n # Run Command\n stdin, stdout, stderr = ssh.exec_command(command)\n\n while True:\n out = stdout.readline()\n # Stderr can block waiting so check to see if its ready\n if stderr.channel.recv_stderr_ready():\n out = out + stderr.readline()\n # If\n if out != \"\":\n yield out\n else:\n break\n\n return_code = stdout.channel.recv_exit_status()\n # Throw exception if return code is not 0\n if return_code:\n ssh.close() # Tidy Up\n exc = \"COMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)\n\n if _connection is None:\n ssh.close()", "def ssh(remoteAddress, remoteCommand, outputPrefix=\"ssh> \"):\n command = [\"ssh\", remoteAddress, \"-t\", \"-o\", \"StrictHostKeyChecking=no\", remoteCommand]\n\n proc = ProcessRunner(command)\n proc.mapLines(WriteOut(sys.stdout, outputPrefix=outputPrefix), procPipeName=\"stdout\")\n proc.mapLines(WriteOut(sys.stderr, outputPrefix=outputPrefix), procPipeName=\"stderr\")\n proc.wait()\n returnCode = proc.poll()\n\n # proc.terminate()\n # proc.shutdown()\n\n return returnCode", "def ssh_execute_command(guestaddr, sshprivkey, command, timeout=10, user='root', prefix=None):\r\n # ServerAliveInterval protects against NAT firewall timeouts\r\n # on long-running commands with no output\r\n #\r\n # PasswordAuthentication=no prevents us from falling back to\r\n # keyboard-interactive password prompting\r\n #\r\n # -F /dev/null makes sure that we don't use the global or per-user\r\n # configuration files\r\n #\r\n # -t -t ensures we have a pseudo tty for sudo\r\n\r\n cmd = [\"ssh\", \"-i\", sshprivkey,\r\n \"-F\", \"/dev/null\",\r\n \"-o\", \"ServerAliveInterval=30\",\r\n \"-o\", \"StrictHostKeyChecking=no\",\r\n \"-o\", \"ConnectTimeout=\" + str(timeout),\r\n \"-o\", \"UserKnownHostsFile=/dev/null\",\r\n \"-t\", \"-t\",\r\n \"-o\", \"PasswordAuthentication=no\"]\r\n\r\n if prefix:\r\n command = prefix + \" \" + command\r\n\r\n cmd.extend([\"%s@%s\" % (user, guestaddr), command])\r\n\r\n if(prefix == 'sudo'):\r\n return subprocess_check_output_pty(cmd)\r\n else:\r\n return subprocess_check_output(cmd)", "def execute_command_async(self, command):\n username = server_setup.get_server_user()\n cmd_ssh = [\"ssh\", username + \"@\" + server_setup.SERVER_ADDRESS]\n full_command = cmd_ssh + command\n process = Popen(full_command)\n return process", "def call_ssh(cmd, host, user=None, timeout=None, cwd=None):\n if user:\n host = \"%s@%s\" % (user, host)\n full_cmd = ['ssh', host, '-oBatchMode=yes', '--']\n if cwd:\n full_cmd.append(\"cd %s;\" % cwd)\n full_cmd.extend(quote(i) for i in cmd)\n return check_output(full_cmd, timeout=timeout)", "def execute_remote_cmd(ip, user, cmd, timeout=10, suppress_output=False):\n cmd = \"ssh -o StrictHostKeyChecking=no %s@%s \\\"%s\\\"\" % (user, ip, cmd)\n l.info(\"Executing remote command [%s] on ip[%s], user[%s]\", cmd, ip, user)\n pg_cmd = PySysCommand(cmd)\n pg_cmd.run(timeout=timeout)\n output = pg_cmd.stdout + pg_cmd.stderr\n if not suppress_output:\n l.info(\"Result: %s\", output)\n return output", "def _spawn_ssh_tunnel(local_port: int, remote_port: int, remote_ip: str,\n server: str, port: int, key: Optional[str] = None):\n if sys.platform == 'win32':\n ssh_server = server + \":\" + str(port)\n return tunnel.paramiko_tunnel(local_port, remote_port, ssh_server, remote_ip, key)\n else:\n ssh = \"ssh -p %s -o ServerAliveInterval=%i\" % (port, max_keep_alive_interval)\n cmd = \"%s -S none -L 127.0.0.1:%i:%s:%i %s\" % (ssh, local_port, remote_ip, remote_port, server)\n return pexpect.spawn(cmd, env=os.environ.copy().pop('SSH_ASKPASS', None))", "def _ssh(self, command, use_pwd=True, use_tty=False, forward_x=False, verbose=False):\n if use_pwd:\n cd_cmd = 'cd cluster_test_%d; ' % self.address[1]\n else:\n cd_cmd = ''\n ssh = ['ssh',\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-o', 'IdentitiesOnly=yes']\n if self.key_file:\n ssh.extend(['-i', self.key_file])\n if use_tty:\n ssh.extend(['-t'])\n \n if forward_x:\n ssh.extend(['-Y'])\n \n ssh.extend([self.user_name + '@' + self.address[0], cd_cmd + command])\n \n if verbose: print(\" \".join(ssh))\n \n # Check whether ssh runs successfully.\n if subprocess.call(ssh) == 0:\n return True\n else:\n return False", "def run(self, command):\n (stdin, stdout, stderr) = paramiko.exec_command(command)\n stdin.close()\n return stdin, stdout, stderr", "def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'):\n port = None\n parts = ssh.split(':', 1)\n if len(parts) > 1:\n port = parts[1]\n quoted_cmd = ' '.join([x.replace(\"'\", \"\"\"'\"'\"'\"\"\") for x in cmd.split(' ')])\n remote_cmd = ' '.join([\n ' '.join(get_shell(shell)), # /usr/bin/env bash\n ' '.join([EXECUTE_SHELL_PARAM, \"'\", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), \"'\"])],\n )\n return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd]", "def ssh_call ( server, identity, cmd ) :\n print \"Running SSH command on server \" + server + \": \" + cmd\n return subprocess.call( [ \"ssh\",\n ssh_opt,\n \"-tt\",\n \"-i\",\n identity,\n \"ec2-user@\" + server,\n cmd ] )", "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)", "def quick_execute(command,ssh_host=None,username=None,password=None,interactive=False,stdin=None,stdout=sys.stdout,stderr=sys.stderr,ignore_password=False):\n sce = SSHCommandExecutor()\n sce.ssh_host(ssh_host)\n sce.username(username)\n sce.password(password)\n sce.prompt_for_missing(ignore_password=ignore_password)\n sce.quick_execute(command,interactive=interactive,stdin=stdin,stdout=stdout,stderr=stderr)\n return sce", "def ssh_cmd(ip=None, port=2222, username=os.environ['USER'], password=None, cmd='id'):\n # define client instance and set host key to autoadd - YOLO!\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # connect\n client.connect(hostname=ip, port=port, username=username, password=password, passphrase='')\n # get a session from our connection\n ssh_session = client.get_transport().open_session()\n\n if ssh_session.active:\n ssh_session.send(cmd)\n print(ssh_session.recv(4096).decode(\"utf-8\"))\n\n while ssh_session.active:\n cmd = ssh_session.recv(4096)\n # get the command from \"ssh server\"\n try:\n cmd_out = subprocess.check_output(cmd, shell=True)\n ssh_session.send(cmd_out)\n except Exception as ex:\n ssh_session.send(str(ex))\n\n client.close()\n\n return", "def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)", "def run_putty(host, port, sshcmd, command):\n if not host or not port:\n return -2\n\n # Trick putty into storing ssh key automatically.\n plink = os.path.join(os.path.dirname(sshcmd), 'plink.exe')\n store_key_cmd = [plink, '-P', port,\n '%s@%s' % (os.environ['USERNAME'], host), 'exit']\n\n _LOGGER.debug('Importing host key: %s', store_key_cmd)\n store_key_proc = subprocess.Popen(store_key_cmd,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = store_key_proc.communicate(input='y\\n\\n\\n\\n\\n\\n\\n\\n\\n')\n\n _LOGGER.debug('plink STDOUT: %s', out)\n _LOGGER.debug('plink STDERR: %s', err)\n\n if command:\n sshcmd = plink\n\n ssh = [sshcmd, '-P', port, '%s@%s' % (os.environ['USERNAME'], host)]\n if command:\n ssh.extend(command)\n\n _LOGGER.debug('Starting ssh: %s', ssh)\n try:\n if os.path.basename(sshcmd).tolower() == 'putty.exe':\n os.execvp(ssh[0], ssh)\n else:\n subprocess.call(ssh)\n except KeyboardInterrupt:\n sys.exit(0)", "def SSH(COMMAND):\n\n # Host\n HOST = \"[email protected]\"\n\n # Process\n ssh = subprocess.Popen([\"ssh\", \"%s\" % HOST, COMMAND],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n result = ssh.stdout.readlines()\n\n # if error\n if result == []:\n error = ssh.stderr.readlines()\n print(sys.stderr, \"ERROR: %s\" % error)\n # if success\n else:\n for i in range(len(result)):\n print(result[i].decode().replace(\"\\n\",''))", "def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))", "def _ssh_master_cmd(addr, user, command, local_key=None):\n ssh_call = ['ssh', '-qNfL%d:127.0.0.1:12042' % find_port(addr, user),\n '-o', 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % find_port(addr, user),\n '-O', command,\n '%s@%s' % (user, addr,)\n ]\n\n if local_key:\n ssh_call.insert(1, local_key)\n ssh_call.insert(1, '-i')\n \n return subprocess.call(ssh_call)", "def execute_command(self, command):\n return self.ssh.exec_command(command)", "def connectSsh(self):\n connect_handle = pexpect.spawn(\"ssh -q -o StrictHostKeyChecking=no root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n #connect_handle.logfile_send = sys.stdout\n i = 0\n ssh_newkey = r'(?i)Are you sure you want to continue connecting'\n remote_key_changed = r\"REMOTE HOST IDENTIFICATION HAS CHANGED\"\n\n perm_denied = r\"(?i)Permission denied\"\n while True:\n i = connect_handle.expect([ssh_newkey, 'assword:',self.promptshell,\n pexpect.EOF, pexpect.TIMEOUT,\n remote_key_changed, perm_denied])\n if i==0:\n connect_handle.sendline('yes')\n continue\n elif i==1:\n logger.info(\"Password supplied\")\n connect_handle.sendline(self.password)\n continue\n\t elif i==2:\n self._mode = CLI_MODES.shell\n self._prompt = self.promptshell\n break\n elif i==3:\n logger.info(\"Connection closed: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Connection Closed: %s\" % self)\n elif i==4:\n logger.warning(\"Timeout while waiting for connection\")\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Unable to establish connection %s\" % self)\n elif i==5:\n logger.warn(\"Removing offending key from .known_hosts..\")\n known_hosts_file = os.path.expanduser(\"~/.ssh/known_hosts\")\n\n if \"darwin\" in sys.platform.lower():\n # MAC OS\n utils.run_cmd(\"sed -i 1 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n elif \"linux\" in sys.platform.lower():\n # Linux\n utils.run_cmd(\"sed -i 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n\n connect_handle = pexpect.spawn(\"ssh root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n\n continue\n elif i==6:\n logger.warning(\"Permission denied: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Permission denied: %s.\" % self)\n return connect_handle", "def connect(self, hostip, username, password, port, command):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostip, username = username, password = password, port=port)\n (stdin, stdout, stderr) = client.exec_command(command)\n stdin.close()\n return stdin, stdout, stderr", "def login (self,server,username,password='',terminal_type='ansi',original_prompts=r\"][#$]|~[#$]|bash.*?[#$]|[#$] \",login_timeout=10):\r\n cmd = \"ssh -l %s %s\" % (username, server)\r\n spawn.__init__(self, cmd, timeout=login_timeout)\r\n #, \"(?i)no route to host\"])\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT, \"(?i)connection closed by remote host\"])\r\n if i==0: # New certificate -- always accept it. This is what you if SSH does not have the remote host's public key stored in the cache.\r\n self.sendline(\"yes\")\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==2: # password\r\n self.sendline(password)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n if i==4:\r\n self.sendline(terminal_type)\r\n i = self.expect([\"(?i)are you sure you want to continue connecting\", original_prompts, \"(?i)password\", \"(?i)permission denied\", \"(?i)terminal type\", TIMEOUT])\r\n\r\n if i==0:\r\n # This is weird. This should not happen twice in a row.\r\n self.close()\r\n return False\r\n elif i==1: # can occur if you have a public key pair set to authenticate. \r\n ### TODO: May NOT be OK if expect() matched a false prompt.\r\n pass\r\n elif i==2: # password prompt again\r\n # For incorrect passwords, some ssh servers will\r\n # ask for the password again, others return 'denied' right away.\r\n # If we get the password prompt again then this means\r\n # we didn't get the password right the first time. \r\n self.close()\r\n return False\r\n elif i==3: # permission denied -- password was bad.\r\n self.close()\r\n return False\r\n elif i==4: # terminal type again? WTF?\r\n self.close()\r\n return False\r\n elif i==5: # Timeout\r\n # This is tricky... presume that we are at the command-line prompt.\r\n # It may be that the prompt was so weird that we couldn't match it.\r\n pass\r\n elif i==6: # Connection closed by remote host\r\n self.close()\r\n return False\r\n else: # Unexpected \r\n self.close()\r\n return False\r\n # We appear to be in -- reset prompt to something more unique.\r\n if not self.set_unique_prompt():\r\n self.close()\r\n return False\r\n return True" ]
[ "0.69595283", "0.6643719", "0.6521727", "0.6504823", "0.6419268", "0.6395431", "0.6382922", "0.6355576", "0.63307595", "0.63192797", "0.63166046", "0.62774366", "0.6273903", "0.62710875", "0.6231371", "0.6216641", "0.62067455", "0.6205677", "0.6203712", "0.61976033", "0.6197345", "0.61765707", "0.61409587", "0.61375165", "0.61338633", "0.61157644", "0.61140203", "0.6104888", "0.60677105", "0.6035797" ]
0.74644256
0
The constructor for reviewsMenuClass. Attributes
def __init__(self): super().__init__() self.status = True self.token = tokensClass() self.layout = layout.reviewsLayoutClass(self) self.title = "LMS Reviews GUI" self.location = (50, 125)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, menu_name):\n self.menu_name = menu_name", "def __init__(self, parent_menu, label):\n self._parent_menu = parent_menu\n self._options = {}\n self._label = label\n self._menu = Menu(parent_menu)\n self._key_underline = 0", "def __init__(self, name, title, icon=None, desc=None, prop=None, style=None, attr=None,\n menu_clicked_callback=None, app=None, css_cls=None):\n MenuItem.__init__(self, name, title, icon=icon, desc=desc, prop=prop, style=style, attr=attr,\n menu_clicked_callback=menu_clicked_callback, app=app, css_cls=css_cls)", "def __init__(self):\n self.movie_reviews = []", "def __init__(self, reviewer_name, review_comments, is_recommended):\n \n self.reviewer = reviewer_name \n self.comments = review_comments \n self.recommend = is_recommended", "def __init__(self, name, title, icon=None, desc=None, prop=None, style=None, attr=None,\n menu_clicked_callback=None, app=None, css_cls=None, disabled=None):\n Widget.__init__(self, name, desc=desc, prop=prop, style=style, attr=attr, css_cls=css_cls)\n self._title = title\n self._icon = icon\n self._menu_clicked_callback = menu_clicked_callback\n self._app = app\n self._disabled = disabled\n if disabled:\n self.add_property('class', 'ui-state-disabled')\n self._attach_onclick()", "def __init__(self, a_description, a_menu_screen, a_content, a_display):\n super(AbstractMenuItemScreen, self).__init__(a_content, a_display)\n self._menu_item_description = a_description\n self._enclosing_menu = a_menu_screen", "def __init__(self, name, menu_type=None, desc=None, prop=None, style=None, attr=None,\n app=None, css_cls=None):\n Widget.__init__(self, name, desc=desc, prop=prop, style=style, attr=attr,\n css_cls=css_cls)\n self._app = app\n if menu_type is None:\n self._menu_type = MenuTypes.VERTICAL\n else:\n self._menu_type = menu_type", "def _init(self):\n self.wx_menu = wx.Menu()", "def __init__(self, menu_manager):\n\n\t\tself.menu_manager = menu_manager\n\t\tself.uiCoordinator = menu_manager.uiCoordinator\n\t\tself.highscore = menu_manager.user.highscore\n\t\tself.name = menu_manager.user.username\n\t\tself.high_scores = menu_manager.con.getHighscores(\"DESC\")\n\t\tself.level = menu_manager.level\n\t\tself.open_window = False\n\n\t\tself.setupWindow()\n\t\tself.setupInfo()\n\t\tself.setupBinds()", "def __init__(self, parent):\n QtGui.QMenu.__init__(self, parent)\n self.parent = parent", "def create_menus( self ):", "def __init__(self, parent=None):\n super().__init__(parent)\n # print(self.__dict__.keys())\n\n # print(self.__dict__.keys(), '\\n\\n')\n #\n # print(self.__dict__['menu'].__dict__.keys())\n # print(self.__dict__['menu']['leftMenu'])\n # self._viewbox.fftCheck.setObjectName(\"fftCheck\")\n\n # self.viewAll = QtGui.QRadioButton(\"Vue d\\'ensemble\")\n # self.viewAll.triggered.connect(self.autoRange)\n # self.menu.addAction(self.viewAll)\n # print(self.menu.__dict__['leftMenu'].__dict__)", "def __init__(self, title='', parent=None):\n super(MenuView, self).__init__(title, parent)\n self.text_column = 0\n \"\"\"The column for the action text. Default 0\"\"\"\n self.icon_column = 0\n \"\"\"The column for the action icon. Default 0\"\"\"\n self.icontext_column = -1\n \"\"\"The column for the action icon text. Default -1\"\"\"\n self.tooltip_column = 0\n \"\"\"The column for the tooltip data. Default 0\"\"\"\n self.checked_column = 0\n \"\"\"The column for the checked data. Has to be checkable. Default 0\"\"\"\n self.whatsthis_column = 0\n \"\"\"The column for the whatsThis text. Default 0\"\"\"\n self.statustip_column = 0\n \"\"\"The column for the statustip text. Default 0\"\"\"\n self._model = None\n\n Qt = QtCore.Qt\n args = [SetDataArgs('setText', 'text_column', Qt.DisplayRole, str),\n SetDataArgs('setIcon', 'icon_column', Qt.DecorationRole, self._process_icondata),\n SetDataArgs('setIconText', 'icontext_column', Qt.DisplayRole, str),\n SetDataArgs('setToolTip', 'tooltip_column', Qt.ToolTipRole, str),\n SetDataArgs('setChecked', 'checked_column', Qt.CheckStateRole, self._checkconvertfunc),\n SetDataArgs('setWhatsThis', 'whatsthis_column', Qt.WhatsThisRole, str),\n SetDataArgs('setStatusTip', 'statustip_column', Qt.StatusTipRole, str)]\n self.setdataargs = args\n \"\"\"A list of :class:`SetDataArgs` containers. Defines how the\n data from the model is applied to the action\"\"\"", "def __init__(self, *args, **kwargs):\n self.organisation = kwargs.pop('organisation', None)\n self.cobrand = kwargs.pop('cobrand', None)\n super(ReviewTable, self).__init__(*args, **kwargs)", "def __init__(self, name, price_range, cuisine_list):\n # Assume this method body has been correctly implemented.\n self.name = name \n self.price_range = price_range \n self.cuisine_list = cuisine_list\n self.reviews = []", "def __init__(self, id_movie, title, tags, ratings):\n\n self.id_movie = id_movie\n self.title = title\n self.tags = tags\n self.ratings = ratings", "def __init__(self, parent_item=None, menuitems=[], x=0, y=0, horiz=True,\r\n position='right', visible=True):\r\n self.visible = visible\r\n self.parent_item = parent_item\r\n if parent_item:\r\n parent_item.child_menu = self\r\n if position == 'right':\r\n self.x = parent_item.bounds[2] + 5\r\n self.y = parent_item.bounds[3]\r\n else:\r\n self.x = parent_item.bounds[0]\r\n self.y = parent_item.bounds[1]\r\n else:\r\n self.x = x\r\n self.y = y\r\n i_x = self.x\r\n i_y = self.y\r\n for item in menuitems:\r\n item.own_menu = self\r\n item.relocate(i_x, i_y)\r\n item.visible = visible\r\n if horiz:\r\n i_x = item.bounds[2] + 5\r\n else:\r\n i_y = item.bounds[1]\r\n self.menuitems = menuitems\r\n if parent_item != None:\r\n self.hide()", "def __init__(self, title, isbn):\n self.title = title\n self.isbn = isbn\n self.ratings = []", "def __init__(self, _name, _drink=menu.water, _food=menu.bread):\n self.name = _name\n self.drinks = []\n self.food = []\n self.drinks.append(_drink)\n self.food.append(_food)", "def __init__(self, name, title, items, show_icon=True, desc=None, prop=None, style=None, attr=None,\n onclick_callback=None, app=None, css_cls=None):\n Widget.__init__(self, name, desc=desc, prop=prop, style=style, attr=attr,\n css_cls=css_cls)\n self._title = title\n self._items = items\n self._show_icon = show_icon\n self._app = app\n self._onclick_callback = onclick_callback\n self._disabled_buttons = {}", "def __init__(self,\n *,\n attributes: List['Attribute'] = None) -> None:\n self.attributes = attributes", "def __init__(self):\n self.key = None\n self.name = None\n self.menu = None\n self.ictype = None\n self.icind1 = None\n self.icind2 = None\n self.iexist = None\n self.icname = None", "def setUp(self):\n self.new_review = Review(title = \"\")", "def __init__(self, MENUWIDTH, MENUHEIGHT):\n \n self._menu_items = []\n self.menu_Text = []\n #Background\n self.menuRect = pygame.Rect((0, 0), (MENUWIDTH, MENUHEIGHT))", "def __init__(\n self,\n author,\n book_title,\n publisher,\n edition,\n category,\n copies,\n user_id):\n self.author = author\n self.book_title = book_title\n self.publisher = publisher\n self.edition = edition\n self.category = category\n self.copies = copies\n self.creator_id = user_id", "def __init__(self):\n # This table is used to inform HTML of items to be placed in main menu\n # -- data provider requirements are \"title\" and \"key\"\n # ---- \"title\" is displayed in dropdown\n # ---- \"key\" is used in building dynamic URL (https://www.tutorialspoint.com/flask/flask_variable_rules.htm)\n self._menus = [\n {\"title\": 'CSA: Java', 'key': 'java', 'url': 'https://csa.nighthawkcodingsociety.com/'},\n {\"title\": 'CSP: Python', 'key': 'python', 'url': 'https://csp.nighthawkcodingsociety.com/'},\n pi_details(),\n git_details(),\n pbl_details()\n ]\n\n \"\"\"dictionary that goes with menu selection\"\"\"\n # This dictionary is used to obtain data associated with a dynamic URL\n # -- The key looked up in the dictionary returns a list that has two elements\n # ---- [0] the title associated to key, used for display on landing page\n # ---- [1] the projects/choices associated to key, used to populate choices on landing page selector widget\n self.TITLE = 0\n self.PROJECTS = 1\n self._select_2_proj = {\n pi_details()['key']: [pi_details()['title'], pi_projects()],\n git_details()['key']: [git_details()['title'], git_projects()],\n pbl_details()['key']: [pbl_details()['title'], pbl_projects()]\n }\n\n \"\"\"dictionary that maps key (route) with value (data) for project page\"\"\"\n self._lessons_dict = {\n pi_webserver()['route']: pi_webserver(),\n pi_deploy()['route']: pi_deploy(),\n pi_portforward()['route']: pi_portforward(),\n pi_realvnc()['route']: pi_realvnc(),\n pi_vncsetup()['route']: pi_vncsetup(),\n pi_ssh()['route']: pi_ssh(),\n git_concepts()['route']: git_concepts(),\n git_replto()['route']: git_replto(),\n pbl_overview()['route']: pbl_overview(),\n pbl_scrum()['route']: pbl_scrum(),\n }", "def __init__(name, title=\"\", description=\"\"):", "def __init__(self, **attrs):\n \n self.type = None\n \n self.collection_id = None\n self.theme_id = None\n self.name = None\n \n self.designer_name = None\n self.designer_url = None\n \n self.year = None\n self.pieces = None\n \n self.url = None\n self.img_url = None\n \n self.count = None\n \n super().__init__(**attrs)", "def __init__(self):\n\t\tself.relevances = None" ]
[ "0.7024792", "0.6597017", "0.6345977", "0.6256314", "0.62525284", "0.62365556", "0.614118", "0.60794574", "0.60352033", "0.59544903", "0.59349453", "0.59194565", "0.5849597", "0.5827087", "0.58053714", "0.57678133", "0.57440007", "0.57258856", "0.5698869", "0.56835824", "0.5676005", "0.56605583", "0.56532204", "0.56413144", "0.5626103", "0.56195104", "0.56191117", "0.559165", "0.5587777", "0.5550595" ]
0.68060464
1
Encodes a dictionary (asterix) in the EUROCONTROL ASTERIX category.
def encode(asterix): assert type(asterix) is dict asterix_record = 0 #priority_asterix_cat = [21, 34] for k, v in asterix.iteritems(): #for k in priority_asterix_cat: v = asterix[k] record = 0 n_octets_data_record = 0 cat = 0 ctf = load_asterix_category_format(k) if ctf is None: continue if verbose >= 1: print 'encoding cat', k cat = k for cat_tree in ctf.getElementsByTagName('Category'): if k != int(cat_tree.getAttribute('id')): continue for data_record in v: ll_db, db = encode_category(k, data_record, cat_tree) #TODO: use maximum datablock size record <<= ll_db * 8 record += db n_octets_data_record += ll_db if verbose >= 1: print "Tamanho do bloco de dados ", ll_db break # Record header ( CAT + LEN ) record += (cat << (n_octets_data_record * 8 + 16)) record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8)) asterix_record <<= (1 + 2 + n_octets_data_record) * 8 asterix_record += record return asterix_record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(category_main : ):", "def latin1_to_ascii(self, unicrap):\n xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n 0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for i in unicrap:\n if xlate.has_key(ord(i)):\n r += xlate[ord(i)]\n elif ord(i) >= 0x80:\n pass\n else:\n r += str(i)\n return r", "def _utf8_encode(self, d):\n \n\n #***Edit by H. Loho: Got rid of .lower(), because that messes up the Lucene modifiers OR, AND ********************\n\n\n\n for k, v in d.items():\n if isinstance(v, str):\n d[k] = v.encode('utf8')\n if isinstance(v, list):\n for index,item in enumerate(v):\n item = item.encode('utf8')\n v[index] = item\n if isinstance(v, dict):\n d[k] = self._utf8_encode(v)\n \n return d", "def encode(self, value):\r\n pass", "def beautify_json(self) -> None:\n for letter in self.data:\n for category in self.data[letter]:\n self.data[letter][category] = str(self.data[letter][category.lower()])\n self.save()\n with open(dict_path, encoding='utf-8') as x:\n data = x.read()\n with open(dict_path, 'w', encoding='utf-8') as y:\n data2 = data.replace('\"[', '[').replace(']\"', ']').replace(\"'\", '\"')\n y.write(data2)", "def setMyIndustryData(self, prefix):\n abr = string.upper(prefix + self.key)\n for id, myIndustryData in self.myParent.industrydata.iteritems():\n if myIndustryData.abr == abr:\n self.myIndustryData = myIndustryData\n return", "def __str__(self):\n return 'encoded value {} for {}\\nvalue = {}'.format(self.i_category, self.harmonized_trait, self.i_value)", "def encode(self, decoded):", "def recode_value_dict(dictionary):\n somedict = {k:v.encode('cp1251') for k, v in dictionary.items()}\n return somedict", "def encode(self):\n\n # Start from occupancy\n encoding = self.occupancy.copy();\n\n # Add goals\n for g in self.goals:\n if g in self.discovered_goals:\n encoding[self.tocellcoord[g]] += 10\n else:\n encoding[self.tocellcoord[g]] += 100\n\n # Add agents\n for pos in self.currstate:\n encoding[self.tocellcoord[pos]] += 2\n\n return encoding", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def succinic_acid():\n return {\"name\": \"Suc\", \"config\": Config.UNDEF, \"isomer\": Enantiomer.U, \"lactole\": Lactole.OPEN,\n \"smiles\": \"OC[C@H](O)CCO\", \"c1_find\": lambda x: c1_finder(x, \"OC[C@H](O)CCO\")}", "def caesar_encode(self, text, key):\n result_list = []\n for char in text:\n if char.isalpha():\n if char.islower():\n offset = ASCII_LOWER_OFFSET\n else:\n offset = ASCII_UPPER_OFFSET\n char = chr((ord(char) - offset + key) % ALPHABET_SIZE + offset)\n result_list.append(char)\n return ''.join(result_list)", "def unicode2ascii(_unicrap):\n xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',\n 0xc6:'Ae', 0xc7:'C',\n 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',\n 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',\n 0xd0:'Th', 0xd1:'N',\n 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',\n 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',\n 0xdd:'Y', 0xde:'th', 0xdf:'ss',\n 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',\n 0xe6:'ae', 0xe7:'c',\n 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',\n 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',\n 0xf0:'th', 0xf1:'n',\n 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',\n 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',\n 0xfd:'y', 0xfe:'th', 0xff:'y',\n 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',\n 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',\n 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',\n 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',\n 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:\"'\",\n 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',\n 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',\n 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',\n 0xd7:'*', 0xf7:'/'\n }\n\n s = \"\"\n for i in _unicrap:\n ordi = ord(i)\n if ordi in xlate:\n s += xlate[ordi]\n elif ordi >= 0x80:\n pass\n else:\n s += str(i)\n return s", "def encode(self):\n\n ret = {}\n ret[DC] = ''.join(encode_huffman(v, self.layer_type)\n for v in self.diff_dc)\n ret[AC] = ''.join(encode_huffman(v, self.layer_type)\n for v in self.run_length_ac)\n return ret", "def encode(self, value):\n raise NotImplementedError()", "def encode_metadata_dict(metadict):\n return _json.dumps(metadict, separators=(',', ':')).encode('ascii')", "def __encode_ordinal(self):\n for key, value in self.ord_dict.items():\n if key in self.train_df.columns:\n if self.test_df is not None:\n self.test_df[key + str(\"Encoded\")] = self.test_df[key].map(\n value\n )\n self.test_df[key + str(\"Encoded\")] = self.test_df[\n key + str(\"Encoded\")\n ].astype(\"category\")\n\n self.train_df[key + str(\"Encoded\")] = self.train_df[key].map(\n value\n )\n self.train_df[key + str(\"Encoded\")] = self.train_df[\n key + str(\"Encoded\")\n ].astype(\"category\")\n self.ord_cols.append(key + str(\"Encoded\"))", "def encode(self, value):\r\n return value", "def output_aa_string(residues):\n # Dictionary of 3 letter to 1 letter AA conversion\n aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',\n 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',\n 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',\n 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}\n\n s = ''\n for res in residues:\n s = s + aa_dict.get(res.type)\n return s", "def encode_data(self, data):\n if self.unit == \"char\":\n data = self.char_encoding(data)\n elif self.unit == \"char-ngram\":\n data = self.ngram_encoding(data)\n elif self.unit == \"morpheme\" or self.unit == \"oracle\":\n data = self.morpheme_encoding(data)\n else:\n data = self.data_to_word_ids(data, False)\n return data", "def test_write_a(self):\n self._test_write(self.encoding_a, self.certificate_type_a,\n self.certificate_value_a)", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def addInfo(self, **data):\n for key, value in viewitems(data):\n # assumption: value is not iterable (list, dict, tuple, ...)\n # using unicode sandwich pattern\n key = decodeBytesToUnicode(key, \"ignore\")\n value = decodeBytesToUnicode(value, \"ignore\")\n self.data[key] = value\n return", "def _encode_supplement(self):", "def _encode_supplement(self):", "def rivine_binary_encode(self, encoder):\n pass", "def encode(self,b):\n raise NotImplementedError('subclasses must override encode()!')", "def uCSIsArmenian(code):\n ret = libxml2mod.xmlUCSIsArmenian(code)\n return ret", "def encode(self, strs):" ]
[ "0.5903913", "0.49887666", "0.49819857", "0.49383077", "0.49203125", "0.4909551", "0.4891353", "0.47965437", "0.47933576", "0.4793037", "0.47612906", "0.47022176", "0.4689794", "0.46841627", "0.46771052", "0.46750674", "0.4664644", "0.4662446", "0.4597144", "0.4587141", "0.4581841", "0.45794678", "0.45794082", "0.45433965", "0.452631", "0.452631", "0.45226067", "0.4517574", "0.45142865", "0.45075426" ]
0.6899768
0
Encodes the record from the given category (cat).
def encode_category(cat, did, tree): if did == {}: return 0, 0 mdi = {} for c in tree.getElementsByTagName('DataItem'): di = c.getAttribute('id') if di.isdigit(): di = int(di) rule = c.getAttribute('rule') if di in did: if verbose >= 1: print 'encoding dataitem', di l, v = encode_dataitem(did[di], c) mdi[di] = l, v else: if rule == 'mandatory' and verbose >= 1: print 'absent mandatory dataitem', di data_record = 0L n_octets_data_record = 0 sorted_mdi_keys = sorted(mdi.keys()) fspec_bits = [] uap_tree = tree.getElementsByTagName('UAP')[0] for cn in uap_tree.childNodes: if cn.nodeName != 'UAPItem': continue uapi_value = cn.firstChild.nodeValue if uapi_value.isdigit(): uapi_value = int(uapi_value) if uapi_value in sorted_mdi_keys: fspec_bits.append(int(cn.getAttribute('bit'))) l, v = mdi[uapi_value] data_record <<= l * 8 data_record += v n_octets_data_record += l if fspec_bits == []: print 'no dataitems identified' return 0, 0 # FSPEC for data record max_bit = max(fspec_bits) n_octets_fspec = max_bit / 8 + 1 # Fn fspec = 0 for i in fspec_bits: fspec += (1 << (n_octets_fspec * 8 - 1 - i)) # FX for i in range(n_octets_fspec - 1): fspec += (1 << ((n_octets_fspec - 1 - i) * 8)) data_record += (fspec << (n_octets_data_record * 8)) n_octets_data_record += n_octets_fspec return n_octets_data_record, data_record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(category_main : ):", "def add_category(self, category: str) -> None:\n for letter in self.data:\n if not self.data[letter].get(category):\n self.data[letter][category] = []\n print(f'Categoria: {category} adicionada ao dicionário.')\n self.save()\n self.beautify_json()", "def add_category(self, cid: str, cat: str):\n self.logging.info(f\"adding category: {cat} with it {cid}\")\n if self.sess.query(exists().where(Category.category_id == cid or Category.category == cat)).scalar():\n return\n genre = Genre(cid=uuid4().hex,\n categorey_id=cid,\n category=cat)\n self.sess.add(genre)\n self.sess.commit()", "def encode(asterix):\n assert type(asterix) is dict\n\n asterix_record = 0\n\n #priority_asterix_cat = [21, 34]\n for k, v in asterix.iteritems():\n #for k in priority_asterix_cat:\n v = asterix[k]\n record = 0\n n_octets_data_record = 0\n cat = 0\n\n ctf = load_asterix_category_format(k)\n\n if ctf is None:\n continue\n\n if verbose >= 1:\n print 'encoding cat', k\n\n cat = k\n\n for cat_tree in ctf.getElementsByTagName('Category'):\n\n if k != int(cat_tree.getAttribute('id')):\n continue\n\n for data_record in v:\n ll_db, db = encode_category(k, data_record, cat_tree)\n\n #TODO: use maximum datablock size\n record <<= ll_db * 8\n record += db\n n_octets_data_record += ll_db\n\n if verbose >= 1:\n print \"Tamanho do bloco de dados \", ll_db\n\n break\n\n # Record header ( CAT + LEN )\n record += (cat << (n_octets_data_record * 8 + 16))\n record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8))\n\n asterix_record <<= (1 + 2 + n_octets_data_record) * 8\n asterix_record += record\n\n return asterix_record", "def save(self, category):\n self.db.query(f\"\"\"\n INSERT INTO {self.table} (id, name)\n VALUES (:id, :name)\n ON DUPLICATE KEY UPDATE name = :name\n \"\"\", **vars(category))\n return category", "def FormatRecord(self, record, categories):\n catsmodified=False\n res={'id': 0} # zero means create new record\n for i in record.get(\"serials\", []):\n if i['sourcetype']=='egroupware':\n res['id']=i['id']\n break\n res['n_given'],res['n_middle'],res['n_family']=nameparser.getparts(record.get(\"names\", [{}])[0])\n for nf in 'n_given', 'n_middle', 'n_family':\n if res[nf] is None:\n res[nf]=\"\" # set None fields to blank string\n res['fn']=nameparser.formatsimplename(record.get(\"names\", [{}])[0])\n for t,prefix in (\"business\", \"adr_one\"), (\"home\", \"adr_two\"):\n a={}\n adr=record.get(\"addresses\", [])\n for i in adr:\n if i['type']==t:\n for p2,k in (\"_street\", \"street\"), (\"_locality\", \"city\"), (\"_region\", \"state\"), \\\n (\"_postalcode\", \"postalcode\"), (\"_countryname\", \"country\"):\n res[prefix+p2]=i.get(k, \"\")\n if t==\"business\":\n res['org_name']=i.get(\"company\",\"\")\n break\n if \"emails\" in record:\n for t,k in (\"business\", \"email\"), (\"home\", \"email_home\"):\n for i in record[\"emails\"]:\n if i.get(\"type\",None)==t:\n res[k]=i.get(\"email\")\n res[k+\"_type\"]=\"INTERNET\"\n break\n cats={}\n for cat in record.get(\"categories\", []):\n c=cat['category']\n v=categories.get(c, None)\n if v is None:\n catsmodified=True\n for i in xrange(0,-999999,-1):\n if `i` not in cats:\n break\n else:\n i=`v`\n cats[i]=str(c)\n res['cat_id']=cats\n for t,k in (\"home\", \"tel_home\"), (\"cell\", \"tel_cell\"), ('fax','tel_fax'), \\\n ('pager', 'tel_pager'), ('office', 'tel_work'):\n if \"numbers\" in record:\n v=\"\"\n for i in record['numbers']:\n if i['type']==t:\n v=i['number']\n break\n res[k]=phonenumber.format(v)\n if \"memos\" in record:\n memos=record.get(\"memos\", [])\n memos+=[{}]\n res['note']=memos[0].get(\"memo\",\"\")\n if \"urls\" in record:\n urls=record.get(\"urls\", [])\n u=\"\"\n for url in urls:\n if url.get(\"type\", None)==\"business\":\n u=url[\"url\"]\n break\n if len(u)==0:\n urls+=[{'url':\"\"}]\n u=urls[0][\"url\"]\n res['url']=u\n return catsmodified,res", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def save(self, cat):\n with open(self.filename_csv, 'w', newline='') as csvfile:\n csv_file = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_file.writerow(['name', 'id', 'parent_id'])\n for row in cat:\n csv_file.writerow(row)", "def _categorize(self, slug, category):\n key = self._category_key(category)\n self.r.sadd(key, slug)\n\n # Store all category names in a Redis set, for easy retrieval\n self.r.sadd(self._categories_key, category)", "def encode(record: Tuple[MeasureInput, MeasureResult]) -> str:\n return dump_record_to_string(*record)", "def __encode_categorical_util(self):\n cat = []\n # cat = self.cat_cols\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df[col + str(\"Encoded\")] = pd.factorize(\n self.test_df[col]\n )[0]\n self.test_df[col + str(\"Encoded\")] = self.test_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n self.train_df[col + str(\"Encoded\")] = pd.factorize(\n self.train_df[col]\n )[0]\n self.train_df[col + str(\"Encoded\")] = self.train_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n cat.append(str(col + str(\"Encoded\")))\n self.cat_cols += cat", "def save_cat():\n\n #if cat is already there then flash cat already saved to favs\n #else commit c", "def decode_cat(self, cat: bytes) -> CAT.CAT:\n catdk = CAT.CAT()\n try:\n pointer_field = cat[0]\n pos = 1 + pointer_field\n catdk.table_id = cat[pos]\n b12 = struct.unpack('>H', cat[pos+1:pos+3])[0]\n section_length = b12 & 4095\n pos_crc = pos + 3 + section_length - 4 # - CRC\n b = cat[pos+5] # skip 2 bytes from reserved\n catdk.ver_num = (b & 62) >> 1\n catdk.cur_next_ind = b & 1\n catdk.sec_num = cat[pos+6]\n catdk.last_sec_num = cat[pos+7]\n pos += 8\n if pos < pos_crc:\n catdk.descriptors = DescriptorParser.decode_descriptors(cat[pos:pos_crc])\n try:\n catdk.crc32 = (struct.unpack('>L', cat[pos_crc:pos_crc + 4]))[0]\n crc_check = self.crc32mpeg2(cat[1+pointer_field:pos_crc])\n if catdk.crc32 != crc_check:\n catdk.crc32_ok = False\n except Exception as err:\n catdk.crc32_ok = False\n logging.warning('CAT CRC check error:' + str(err))\n return catdk\n except Exception as err:\n logging.warning('CAT parsing error:' + str(err))\n return None", "def encode(self, desc):\n raise NotImplementedError", "def inc_category_count(self, cat):\n count = self.category_count(cat)\n if count == 0:\n self.con.execute(\"insert into cc values ('%s',1)\" % (cat))\n else:\n self.con.execute(\"update cc set count=%d where category='%s'\" \n % (count+1, cat))\n self.con.commit()", "def to_json(self, category):\r\n return \"{{\\\"id\\\": {0}, \" \\\r\n \"\\\"name\\\": \\\"{1}\\\", \" \\\r\n \"\\\"abbreviation\\\": \\\"{2}\\\", \" \\\r\n \"\\\"category\\\": {3}, \" \\\r\n \"\\\"rank\\\": {4}}}\".format(self.id, self.name, self.abbreviation, category.id, self.rank)", "def add(self, record, categories): # record is in the form 'category item money'\n\n t = record.split() # t is a temporary list in form of ['category', 'item', 'money']\n if len(t) != 3:\n sys.stderr.write('The format of a record should be like this: meal breakfast -50.\\n'\\\n 'Fail to add a record.\\n')\n else:\n r = Record(t[0], t[1], t[2]) # r is an object created by the class Record\n p = categories.is_category_valid(r._category)\n if p != True:\n sys.stderr.write('The specified category is not in the category list.\\n'\\\n 'You can check the category list by command \"view categories\".\\n'\\\n 'Fail to add a record.\\n')\n else:\n try:\n t[2] = int(t[2])\n except ValueError:\n sys.stderr.write('Invalid value for money. Should be an integer.\\nFail to add a record.\\n')\n else:\n self._records.append(record)", "def encode_record(record):\n return json.dumps(record)", "async def set_category(self, category: str, data: dict) -> None:\n with self.session.begin():\n # remove old data\n stmt = delete(self.model)\n if self.category_field:\n stmt = stmt.where(getattr(self.model, self.category_field) == category)\n self.session.execute(stmt)\n\n for key_name, key_data in data.items():\n data = key_data\n if self.category_field:\n data[self.category_field] = category\n if self.key_field:\n data[self.key_field] = key_name\n\n record = self.model(**data)\n self.session.add(record)", "def _write_assoc(self, cat, xname, yname, imgname):\n\t\t\n\t\t#if assoc_xname not in assoc_cat.colnames or assoc_yname not in assoc_cat.colnames:\n\t\t#\traise RuntimeError(\"I don't have columns %s or %s\" % (assoc_xname, assoc_yname))\n\t\t\n\t\tif os.path.exists(self._get_assoc_filepath(imgname)):\t\n\t\t\tlogger.warning(\"ASSOC file already exists, I will overwrite it\")\n\n\t\tlines = []\n\t\tfor (number, row) in enumerate(cat):\n\t\t\t# Seems safe(r) to not use row.index but our own number.\n\t\t\tlines.append(\"%.3f\\t%.3f\\t%i\\n\" % (row[xname], row[yname], number))\n\n\t\tlines = \"\".join(lines)\n\t\tf = open(self._get_assoc_filepath(imgname), \"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tlogger.debug(\"Wrote ASSOC file %s...\" % (self._get_assoc_filepath(imgname)))", "def encode(self, seq):", "async def save(self, category, key, value=None):\n await super(MemoryKVCache, self).save(category, key, value)\n\n if self.in_transaction:\n self.dirty_categories.add(category)", "def _convert_category(category_field):\n\n return category_field # TODO", "def encode_category(df, enc, col, method):\n \n if method == 'label':\n # lb = LabelEncoder()\n # lb.fit(df[col])\n df[col] = enc.transform(df[col].astype(str))\n \n elif method == 'onehot':\n # ohe = OneHotEncoder(handle_unknown='ignore')\n # ohe.fit(df[[col]])\n tempdf = enc.transform(df[[col]]).toarray()\n newdf = pd.DataFrame(tempdf, columns = np.array(ohe.categories_).ravel())\n df = pd.concat([df, newdf], axis=1)\n df.drop(columns=[col], inplace = True)\n \n return df", "def encode(self, decoded):", "def increment_cat(self, category):\r\n self.category_count.setdefault(category, 0)\r\n self.category_count[category] += 1", "def create_category(self, category):\n\n super().new_entry()\n\n return Categories.objects.create(\n name=category['id'].split(':')[1],\n name_fr=category['name'],\n url=category['url']\n )", "async def add(self, category, key, value=None):\n if value is None:\n data = {}\n elif self.default_value_field is None:\n data = value\n else:\n data = {self.default_value_field: value}\n\n if self.category_field:\n data[self.category_field] = category\n\n if self.key_field:\n data[self.key_field] = key\n\n record = self.model(**data)\n self.session.add(record)\n self.session.flush()", "def encode_categorical(df):\n cat_cols = df.select_dtypes(\"category\").columns\n for col in cat_cols:\n df[col] = df[col].cat.codes + 1\n unique_no = len(df[col].unique())\n if unique_no < 50:\n df[col] = df[col].astype(\"uint8\")\n elif unique_no < 16000:\n df[col] = df[col].astype(\"int16\")\n else:\n df[col] = df[col].astype(\"int32\")\n return df", "def store_asset(self, asset, type_, layers, uid_prefix):\n logger.debug('Saving: %s' % asset)\n ci = cdb.CI()\n ci.uid = '%s-%s' % (uid_prefix, asset.id)\n ci.content_object = asset\n ci.type_id = type_\n try:\n # new CI\n ci.save()\n ci.layers = layers\n except IntegrityError:\n # Integrity error - existing CI Already in database.\n # Get CI by uid, and use it for saving data.\n ci = cdb.CI.get_by_content_object(asset)\n ci.name = '%s' % asset.name or unicode(asset)\n if 'barcode' in asset.__dict__.keys():\n ci.barcode = asset.barcode\n if isinstance(asset, db.Device):\n active = not asset.deleted\n else:\n active = True\n ci.state = (\n cdb.CI_STATE_TYPES.ACTIVE if active\n else cdb.CI_STATE_TYPES.INACTIVE\n )\n ci.save()\n return ci" ]
[ "0.67229545", "0.561765", "0.5484989", "0.5475443", "0.5324302", "0.5320905", "0.53020734", "0.524053", "0.51852304", "0.50320554", "0.50262564", "0.5022167", "0.49936855", "0.49604708", "0.4958038", "0.4957628", "0.49234056", "0.49139455", "0.48985356", "0.48847762", "0.48765716", "0.48656026", "0.48510313", "0.48301384", "0.48300618", "0.47931105", "0.47817457", "0.47758955", "0.47528148", "0.47302535" ]
0.6056459
1
Returns the encoded Data Item. Encodes the Data Item in the data field of record according to the rules defined in the XML file.
def encode_dataitem(dfd, tree): assert type(dfd) is dict or type(dfd) is list for c in tree.getElementsByTagName('DataItemFormat'): for d in c.childNodes: if d.nodeName == 'Fixed': return encode_fixed(dfd, d) else: if d.nodeName == 'Variable': return encode_variable(dfd, d) else: if d.nodeName == 'Repetitive': return encode_repetitive(dfd, d) else: if d.nodeName == 'Compound': return encode_compound(dfd, d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prep_posting_data(cls, items: List[dict]) -> bytes:\n\n def my_item_func(x):\n if x == \"contributors\":\n return \"contributor\"\n elif x == \"records\":\n return \"record\"\n\n xml = dicttoxml(\n items, custom_root=\"records\", attr_type=False, item_func=my_item_func\n )\n records_xml = parseString(xml)\n items = records_xml.getElementsByTagName(\"item\")\n for item in items:\n records_xml.renameNode(item, \"\", item.parentNode.nodeName[:-1])\n for item in records_xml.getElementsByTagName(\"contributor\"):\n item.setAttribute(\"contributorType\", \"Researcher\")\n return records_xml.toxml().encode(\"utf-8\")", "def _get_data(self, record, encoder):\n try:\n return encoder.transform(record)\n except AttributeError:\n return encoder(record)", "def encode_data(self, data):\n if self.unit == \"char\":\n data = self.char_encoding(data)\n elif self.unit == \"char-ngram\":\n data = self.ngram_encoding(data)\n elif self.unit == \"morpheme\" or self.unit == \"oracle\":\n data = self.morpheme_encoding(data)\n else:\n data = self.data_to_word_ids(data, False)\n return data", "def encode_data(data, item_to_encoding):\n for line in data:\n for i, itemset in enumerate(line[1:]):\n encoded_itemset = set()\n for item in itemset:\n encoded_itemset.add(item_to_encoding[item])\n line[i + 1] = encoded_itemset\n\n return data", "def to_bytes(self):\n pref = Utf8String(self.prefix)\n data = super(ElementRecord, self).to_bytes()\n type = data[0]\n return (type + pref.to_bytes() + data[1:])", "def enc(self, data):\n return data", "def decode(data): #@NoSelf", "def serialize(self, data):\n return data", "def _data_tag_element(dataarray, encoding, datatype, ordering):\n import zlib\n ord = array_index_order_codes.npcode[ordering]\n enclabel = gifti_encoding_codes.label[encoding]\n if enclabel == 'ASCII':\n da = _arr2txt(dataarray, datatype)\n elif enclabel in ('B64BIN', 'B64GZ'):\n out = dataarray.tostring(ord)\n if enclabel == 'B64GZ':\n out = zlib.compress(out)\n da = base64.b64encode(out).decode()\n elif enclabel == 'External':\n raise NotImplementedError(\"In what format are the external files?\")\n else:\n da = ''\n\n data = xml.Element('Data')\n data.text = da\n return data", "def __repr__(self):\n # For unknown rdata just default to hex\n return binascii.hexlify(self.data).decode()", "def init_data_item(self, data):\n construct_str = 'nodeitem.NodeItem('\n attrs = list()\n if data is not None:\n for attr, value in data.iteritems():\n if attr != 'data':\n attrs.append('%s=%s' % (attr, value))\n construct_str += str(attrs).strip('[]').replace('u\\'', '').replace('\\'', '')\n construct_str += ')'\n return eval(construct_str)", "def _encode_supplement(self):", "def _encode_supplement(self):", "def encode(self) :\n\t\tbitmap = ISO8583Bitmap()\n\t\ttexts=[]\n\t\tfor i in range(2,129) :\n\t\t\tid = 'f%03d' % i\n\t\t\tif hasattr(self,id) :\n\t\t\t\tv = getattr(self,id)\n\t\t\t\ttyp = self.desc_dict[id]['type']\n\t\t\t\tbitmap.setBitmap(i)\n\t\t\t\t# logit(\"%s:%s\" % (id,v))\n\t\t\t\ttxt = dataAttachTo8583(v,typ)\n\t\t\t\ttexts.append(txt)\n\t\treturn (bitmap,''.join(texts))", "def _encode(self, data):\n raise NotImplementedError(\"_encode needs to be implemented in {} subclass\".format(type(self).__name__))", "def packed_data_from_item(self, item):\n\n # Pack the header parts into a struct with the order:\n # (inUse, previousBlock, length, nextBlock)\n header_struct = struct.Struct(self.HEADER_STRUCT_FORMAT_STR)\n packed_data = header_struct.pack(item.inUse,\n item.previousBlock,\n item.length,\n item.nextBlock)\n # Pad the string to store with enough null bytes to fill the block\n padded_name = self.pad_string(item.string)\n\n return packed_data + bytes(padded_name)", "def get_data_item(self):\n raise exceptions.NotImplemented", "def convertData(data):\n\n return data", "def encode_data_readable(cls, obj, typedef):\n return cls.encode_data(obj, typedef)", "def serialize(self, data):", "def decode(data):\n raise NotImplementedError", "def decode(self, data):\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "def encode_category(cat, did, tree):\n if did == {}:\n return 0, 0\n\n mdi = {}\n for c in tree.getElementsByTagName('DataItem'):\n di = c.getAttribute('id')\n if di.isdigit():\n di = int(di)\n rule = c.getAttribute('rule')\n if di in did:\n if verbose >= 1:\n print 'encoding dataitem', di\n l, v = encode_dataitem(did[di], c)\n mdi[di] = l, v\n else:\n if rule == 'mandatory' and verbose >= 1:\n print 'absent mandatory dataitem', di\n\n data_record = 0L\n n_octets_data_record = 0\n sorted_mdi_keys = sorted(mdi.keys())\n\n fspec_bits = []\n uap_tree = tree.getElementsByTagName('UAP')[0]\n for cn in uap_tree.childNodes:\n if cn.nodeName != 'UAPItem':\n continue\n\n uapi_value = cn.firstChild.nodeValue\n\n if uapi_value.isdigit():\n uapi_value = int(uapi_value)\n\n if uapi_value in sorted_mdi_keys:\n fspec_bits.append(int(cn.getAttribute('bit')))\n l, v = mdi[uapi_value]\n data_record <<= l * 8\n data_record += v\n n_octets_data_record += l\n\n if fspec_bits == []:\n print 'no dataitems identified'\n return 0, 0\n\n # FSPEC for data record\n max_bit = max(fspec_bits)\n n_octets_fspec = max_bit / 8 + 1\n\n # Fn\n fspec = 0\n for i in fspec_bits:\n fspec += (1 << (n_octets_fspec * 8 - 1 - i))\n\n # FX\n for i in range(n_octets_fspec - 1):\n fspec += (1 << ((n_octets_fspec - 1 - i) * 8))\n\n data_record += (fspec << (n_octets_data_record * 8))\n n_octets_data_record += n_octets_fspec\n\n return n_octets_data_record, data_record", "def decode(self, x):\n return x", "def getData(self):\n return utf8decoder(self.data)[0]", "def decode(self, encoded):", "def __bytes__(self):\n return bytes([self.type * 2, len(self.value)]) + bytes(self.value, 'utf-8')", "def __bytes__(self):\n return bytes([self.type * 2, len(self.value)]) + bytes(self.value, 'utf-8')" ]
[ "0.5869862", "0.5869558", "0.5837552", "0.5814655", "0.56730103", "0.5657856", "0.56101656", "0.5610163", "0.55314076", "0.544302", "0.5434582", "0.54142344", "0.54142344", "0.53586537", "0.53413665", "0.53378576", "0.5311251", "0.53084904", "0.529813", "0.52737993", "0.5266528", "0.52628624", "0.52621835", "0.5250218", "0.5231699", "0.522709", "0.5222802", "0.5214609", "0.52030593", "0.52030593" ]
0.5936579
0
Board Path Cloner This function is used to clone the BoardPath object.
def clone(self): # Run the constructor. other = BoardPath() # Copy the object variables other._current_cost = self._current_cost other._path = self._path[:] other._current_loc = self._current_loc return other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n joined_function = lambda: dot_joiner(self.path, self.path_type)\n return self.__class__(self.path, self.configuration, self.converters, self.ignore_converters, joined_function=joined_function)", "def clone(self) -> 'BoardGraph':\n return self.__class__(self.board_class)", "def copy(self):\r\n\t\tnewBoard = BoardClass()\r\n\r\n\t\tfor row in self.board:\r\n\t\t\tnewBoard.board.append(row[:])\r\n\t\tnewBoard.x = self.x\r\n\t\tnewBoard.y = self.y\r\n\t\tnewBoard.heuristic = self.heuristic\r\n\t\tnewBoard.n = self.n\r\n\t\tnewBoard.hType = self.hType\r\n\t\tnewBoard.steps = self.steps\r\n\r\n\t\treturn newBoard", "def clone(self):\n copy = Board(self.game)\n for old_piece in self.game_pieces:\n copy.game_pieces.append(old_piece.clone())\n \n return copy", "def copy(self):\n return Node(deepcopy(self.board), self.location, self.stack_size, self.target_location, path=deepcopy(self.path))", "def deepCopy(self):\n clone = Connect_N_Board(self.getWidth(), self.getHeight())\n import copy\n clone.cell = copy.deepcopy(self.cell)\n return clone", "def clone(self):", "def copy(self):\n return PathPoint(self.species.new_species(), deepcopy(self.constraints))", "def clone(self):\n sc=copy.copy(self)\n sc.farms=list()\n for f in self.farms:\n sc.farms.append(f.clone(f.name, f.size))\n sc.airborne=list()\n for a in self.airborne:\n sc.airborne.append(a.clone(a.farma, a.farmb, a.distance))\n return sc", "def clone(self):\n \n return TTTBoard(self.dim, self.reverse, self.board)", "def clone(self):\n raise NotImplementedError", "def copy(self):\n return type(self)(self.game_board.copy(), self.current_piece)", "def __deepcopy__(self, memodict={}) -> 'Board':\r\n squares: Dict[Pos2D, Square] = deepcopy(self.squares)\r\n round_num: int = self.round_num\r\n phase: GamePhase = self.phase\r\n winner: PlayerColor = self.winner\r\n\r\n return Board(squares, round_num, phase, winner)", "def get_board_copy(self):\n board_copy = Board()\n board_copy._current_side_color = self._current_side_color\n board_copy._other_side_color = self._other_side_color\n board_copy._rubrics = copy.deepcopy(self._rubrics)\n\n # populate the dict with the copies of the objects:\n for x in range(8):\n for y in range(8):\n piece = board_copy._rubrics[x][y]\n if piece.piece_type != PieceType.PLACEHOLDER:\n board_copy._pieces[piece.color][piece.name] = piece\n\n return board_copy", "def clone(self):\n return TTTBoard(self._dim, self._reverse, self._board)", "def clone(self):\n st = Connect4Env(width=self.width, height=self.height)\n st.current_player = self.current_player\n st.winner = self.winner\n st.board = np.array([self.board[col][:] for col in range(self.width)])\n return st", "def copy(self):\r\n board = []\r\n for row in self.board:\r\n board.append([x for x in row])\r\n return Puzzle(board)", "def clone(self):\n copy = GamePiece((self.x, self.y), self.player)\n return copy", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def __deepcopy__(self, memodict={}):\n dp = Board()\n dp.board = copy.deepcopy(self.board)\n dp.moves = copy.deepcopy(self.moves)\n dp.num_white_pieces = copy.deepcopy(self.num_white_pieces)\n dp.num_black_pieces = copy.deepcopy(self.num_black_pieces)\n dp.num_white_kings = copy.deepcopy(self.num_white_kings)\n dp.num_black_kings = copy.deepcopy(self.num_black_kings)\n return dp", "def copy(self):\r\n copy_board = Board(self._squareCount, self._pebbleCount)\r\n copy_board.squares = [list(row) for row in self.squares]\r\n return copy_board", "def clone(self):\n # copy an instance of the class\n clone = empty_copy(self)\n\n for k in self.__dict__.keys():\n if k not in [\"move_stack\", \"_stack\"]:\n setattr(clone, k, self.__dict__[k])\n else:\n setattr(clone, k, [])\n\n clone.occupied_co = deepcopy(self.occupied_co)\n\n return clone", "def clone(self):\n out, err, code = self.command( [\"git\", \"clone\", self.repo] )\n\n # find the directory into which the\n self.directory = self.path\n for path in os.listdir(self.path):\n self.directory = os.path.join(self.path,path)\n break", "def clone(self):\n return None", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def copy(self) -> 'Line':\n new = Line([cell.copy() for cell in self.cells], self.player)\n new.player_1, new.player_2 = self.player_1, self.player_2\n return new", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle" ]
[ "0.69156903", "0.6680933", "0.6565345", "0.64692", "0.63603795", "0.63189757", "0.6306653", "0.62988687", "0.6227066", "0.6202716", "0.6115038", "0.6103501", "0.6099532", "0.60984504", "0.6085155", "0.6061067", "0.60572535", "0.6047523", "0.5992621", "0.59245497", "0.59100866", "0.5888408", "0.5879134", "0.58722574", "0.5812042", "0.5812042", "0.5812042", "0.5786727", "0.57847136", "0.57847136" ]
0.81588864
0
Current Location Accessor Function to get the current location for this path.
def get_current_location(self): return self._current_loc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_location(self):\n return self.enu_2_local()", "def get_current_location():\n global current_location\n return current_location", "def get_location(self):\r\n return self.__location", "def get_location(self):\n\t\treturn self.location", "def get_location(self):\n return self.location", "def getLocation(self):\n return self._Location", "def _get_current_location(self):\n return self.get_queryset().filter(status=self.model.CURRENT).first()", "def location(self):\n return self._location", "def location(self):\n return self._location", "def get_location(self) -> Union[str, None]:\n return self._get_location()", "def location(self) -> object:\n return self._location", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")" ]
[ "0.82560873", "0.8195256", "0.8011756", "0.79347134", "0.7920117", "0.78825885", "0.7742611", "0.7653603", "0.7653603", "0.7628534", "0.7536863", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.75195265", "0.74887115", "0.74887115", "0.74887115", "0.74887115", "0.74887115", "0.74887115" ]
0.8912243
0
Distance Calculator Flexible function for calculating the distance. Depending on the specified heuristic (either explicit in call or implicit with class), different distances can be returned for the same functions.
def get_distance(self, heuristic=""): # If no heuristic is specified, used the default if(heuristic == ""): heuristic = BoardPath._heuristic if(heuristic == "manhattan"): return self.calculate_manhattan_dist() elif(heuristic == "euclidean"): return self.calculate_euclidean_dist() elif(heuristic == "made_up"): return self.calculate_made_up_dist() else: sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heuristic(current, goal):\r\n distance = getDistance(current, goal)\r\n return distance", "def test_distance(self):\n\n def f(a, b):\n if a == b:\n return 0\n if (a in \"UC\" and b in \"UC\") or (a in \"AG\" and b in \"AG\"):\n return 1\n else:\n return 10\n\n # uses identity function by default\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\"), 3)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\"), 5)\n # should use function if supplied\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"C\", f), 1)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"G\", f), 10)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\", f), 21)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\", f), 50)", "def compute_distance(self, data):\n logger.info('Computing Distances')\n self.num_points = len(data)\n # TODO: Maybe make this into a single mapping function and remove elif\n # ladder\n if self.distance == 'mahalanobis':\n data = np.exp(-1 * data / data.std())\n self.conden_dist_mat = pdist(data, self.distance)\n elif self.distance in ['euclidean', 'cosine']:\n self.conden_dist_mat = pdist(data, self.distance)\n elif self.distance == 'chisqr':\n self.conden_dist_mat = pdist(data, chisqr)\n elif self.distance == 'intersection':\n self.conden_dist_mat = pdist(data, intersection)\n else:\n raise ValueError(\"distance type not supported\")\n self._cleanse_dist_mat()", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def _get_dist_func(distance_method):\n if distance_method == \"euclidean\":\n return metrics.euclidean_distance\n elif distance_method == \"cosine\":\n # Inverse of cosine similarity function return\n return metrics.cosine_distance\n elif distance_method == 'hik':\n return metrics.histogram_intersection_distance_fast\n else:\n # TODO: Support scipy/scikit-learn distance methods\n raise ValueError(\"Invalid distance method label. Must be one of \"\n \"['euclidean' | 'cosine' | 'hik']\")", "def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)", "def _make_simple_distances():\n distances = {}\n def sym(desired, supported, strength):\n \"Define a symmetric distance between languages.\"\n desired_t = tuple(desired.split('-'))\n supported_t = tuple(supported.split('-'))\n distances[desired_t, supported_t] = strength\n distances[supported_t, desired_t] = strength\n\n def one(desired, supported, strength):\n \"Define a one-way distance between languages.\"\n desired_t = tuple(desired.split('-'))\n supported_t = tuple(supported.split('-'))\n distances[desired_t, supported_t] = strength\n\n def ok(desired, supported):\n \"Define the most common type of link: a one-way distance of 10.\"\n one(desired, supported, 10)\n\n sym('no', 'nb', 1)\n sym('hr', 'bs', 4)\n sym('sh', 'bs', 4)\n sym('sr', 'bs', 4)\n sym('sh', 'hr', 4)\n sym('sr', 'hr', 4)\n sym('sh', 'sr', 4)\n sym('ssy', 'aa', 4)\n one('gsw', 'de', 4)\n one('lb', 'de', 4)\n sym('da', 'no', 8)\n sym('da', 'nb', 8)\n ok('ab', 'ru')\n ok('ach', 'en')\n ok('af', 'nl')\n ok('ak', 'en')\n ok('ay', 'es')\n ok('az', 'ru')\n ok('az-Latn', 'ru-Cyrl')\n ok('be', 'ru')\n ok('bem', 'en')\n ok('bh', 'hi')\n ok('bn', 'en')\n ok('bn-Beng', 'en-Latn')\n ok('br', 'fr')\n ok('ceb', 'fil')\n ok('chr', 'en')\n ok('ckb', 'ar')\n ok('co', 'fr')\n ok('crs', 'fr')\n ok('cy', 'en')\n ok('ee', 'en')\n ok('eo', 'en')\n ok('et', 'fi')\n ok('eu', 'es')\n ok('fo', 'da')\n ok('fy', 'nl')\n ok('ga', 'en')\n ok('gaa', 'en')\n ok('gd', 'en')\n ok('gl', 'es')\n ok('gn', 'es')\n ok('gu', 'hi')\n ok('ha', 'en')\n ok('haw', 'en')\n ok('ht', 'fr')\n ok('hy', 'ru')\n ok('hy-Armn', 'ru-Cyrl')\n ok('ia', 'en')\n ok('ig', 'en')\n ok('is', 'en')\n ok('jv', 'id')\n ok('ka-Geor', 'en-Latn')\n ok('ka', 'en')\n ok('kg', 'fr')\n ok('kk', 'ru')\n ok('km', 'en')\n ok('km-Khmr', 'en-Latn')\n ok('kn', 'en')\n ok('kn-Knda', 'en-Latn')\n ok('kri', 'en')\n ok('ku', 'tr')\n ok('ky', 'ru')\n ok('la', 'it')\n ok('lg', 'en')\n ok('ln', 'fr')\n ok('lo', 'en')\n ok('lo-Laoo', 'en-Latn')\n ok('loz', 'en')\n ok('lua', 'fr')\n ok('mfe', 'en')\n ok('mg', 'fr')\n ok('mi', 'en')\n ok('mk', 'bg')\n ok('ml', 'en')\n ok('ml-Mlym', 'en-Latn')\n ok('mn', 'ru')\n ok('mr', 'hi')\n ok('ms', 'id')\n ok('mt', 'en')\n ok('my', 'en')\n ok('my-Mymr', 'en-Latn')\n ok('ne', 'en')\n ok('ne-Deva', 'en-Latn')\n sym('nn', 'nb', 10)\n ok('nn', 'no')\n ok('nso', 'en')\n ok('ny', 'en')\n ok('nyn', 'en')\n ok('oc', 'fr')\n ok('om', 'en')\n ok('or', 'en')\n ok('or-Orya', 'en-Latn')\n ok('pa', 'en')\n ok('pa-Guru', 'en-Latn')\n ok('pcm', 'en')\n ok('ps', 'en')\n ok('ps-Arab', 'en-Latn')\n ok('qu', 'es')\n ok('rm', 'de')\n ok('rn', 'en')\n ok('rw', 'fr')\n ok('sa', 'hi')\n ok('sd', 'en')\n ok('sd-Arab', 'en-Latn')\n ok('si', 'en')\n ok('si-Sinh', 'en-Latn')\n ok('sn', 'en')\n ok('so', 'en')\n ok('sq', 'en')\n ok('st', 'en')\n ok('su', 'id')\n ok('sw', 'en')\n ok('ta', 'en')\n ok('ta-Taml', 'en-Latn')\n ok('te', 'en')\n ok('te-Telu', 'en-Latn')\n ok('tg', 'ru')\n ok('ti', 'en')\n ok('ti-Ethi', 'en-Latn')\n ok('tk', 'ru')\n ok('tk-Latn', 'ru-Cyrl')\n ok('tlh', 'en')\n ok('tn', 'en')\n ok('to', 'en')\n ok('tt', 'ru')\n ok('tum', 'en')\n ok('ug', 'zh')\n ok('ur', 'en')\n ok('ur-Arab', 'en-Latn')\n ok('uz', 'ru')\n ok('uz-Latn', 'ru-Cyrl')\n ok('wo', 'fr')\n ok('xh', 'en')\n ok('yi', 'en')\n ok('yi-Hebr', 'en-Latn')\n ok('yo', 'en')\n ok('zu', 'en')\n sym('sr-Latn', 'sr-Cyrl', 5)\n one('zh-Hans', 'zh-Hant', 15)\n one('zh-Hant', 'zh-Hans', 19)\n sym('zh-Hant-HK', 'zh-Hant-MO', 3)\n\n return distances", "def _distance(self, X, X2=None):\n distance = self.distance\n distance_params = self.distance_params\n if distance_params is None:\n distance_params = {}\n\n if isinstance(distance, str):\n return pairwise_distance(X, X2, distance, **distance_params)\n else:\n if X2 is not None:\n return distance(X, X2, **distance_params)\n # if X2 is None, check if distance allows None X2 to mean \"X2=X\"\n else:\n sig = signature(distance).parameters\n X2_sig = sig[list(sig.keys())[1]]\n if X2_sig.default is not None:\n return distance(X, X2, **distance_params)\n else:\n return distance(X, **distance_params)", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def compute_distance(df):\n pass", "def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)", "def find_distance_in_same_type(self):\n pass", "def heuristic(current, goal):\r\n # return 1\r\n i = current[0] - goal[0]\r\n j = current[1] - goal[1]\r\n return math.sqrt(math.pow(i,2) + math.pow(j,2)) # Your code here\r\n # return math.fabs(current[0] - goal[0]) + math.fabs(current[1] - goal[1])\r", "def calculate_fitness(info):\n return info['distance']", "def test_distance_function(self):\n if connection.ops.oracle:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n elif connection.ops.spatialite:\n if connection.ops.spatial_version < (5,):\n # SpatiaLite < 5 returns non-zero distance for polygons and points\n # covered by that polygon.\n ref_dists = [326.61, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n else:\n ref_dists = [0, 4891.20, 8071.64, 9123.95]\n htown = City.objects.get(name=\"Houston\")\n qs = Zipcode.objects.annotate(\n distance=Distance(\"poly\", htown.point),\n distance2=Distance(htown.point, \"poly\"),\n )\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance.m, ref, 2)\n\n if connection.ops.postgis:\n # PostGIS casts geography to geometry when distance2 is calculated.\n ref_dists = [0, 4899.68, 8081.30, 9115.15]\n for z, ref in zip(qs, ref_dists):\n self.assertAlmostEqual(z.distance2.m, ref, 2)\n\n if not connection.ops.spatialite:\n # Distance function combined with a lookup.\n hzip = Zipcode.objects.get(code=\"77002\")\n self.assertEqual(qs.get(distance__lte=0), hzip)", "def get_distance_metrics():\n\n return [HausdorffDistance(),\n AverageDistance(),\n MahalanobisDistance(),\n VariationOfInformation(),\n GlobalConsistencyError(),\n ProbabilisticDistance()]", "def calculateDistances(df):\n return", "def heuristic_2(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return np.sum(distance)", "def heuristic(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n heuristic = abs(x1 - x2) + abs(y1 - y2)\n return heuristic", "def distance_layer(self, output1, output2, distance):\n \n # Check definition in http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n # Redefined with L1 as per http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf\n if self._loss == 'contrastive loss':\n distance = 'l1'\n \n try:\n assert distance in ['cos','l1','l2']\n except:\n print('Similarity metric must be cosine, L1 or L2 distances')\n \n if distance == 'cos':\n distance = F.cosine_similarity(output1, output2, dim = -1, eps = self._eps)\n elif distance == 'l1':\n distance = self.dist_fc(torch.abs(output1 - output2)).squeeze(1)\n elif distance == 'l2':\n distance = self.dist_fc(torch.abs(output1 - output2) ** 2).squeeze(1)\n \n if self._loss != 'contrastive loss':\n # Passing the distance vector through a similarity function to squish it between 0 and 1\n if self._similarity_fn == 'sigmoid':\n distances = torch.sigmoid(distance)\n elif self._similarity_fn == 'exp':\n distances = torch.exp(-torch.abs(distance))\n elif self._similarity_fn == 'clamp':\n distances = torch.clamp(distance, min = 0.0)\n elif self._similarity_fn is None:\n distances = distance\n \n return distances", "def heuristic(current, goal):\r\n\r\n return Vector2.fromCell(current).distanceTo(Vector2.fromCell(goal))", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def get(cls, approach):\n return approach.distance", "def distance(self,x,y,**kwargs):\n pass", "def distances(self, distance_short: str = 'TORA') -> typing.Union[None, typing.List[int]]:\n distance_shorts = ['TORA', 'TODA', 'ASDA', 'LDA']\n distance_col = distance_shorts.index(distance_short) + 1\n distances = [re.search(r'(\\d+) M', row[distance_col])\n for row in self.airfield.data['2.13']['data'][2:]\n if row[0] == self.designation]\n distances = [int(distance.group(1)) for distance in distances if distance is not None]\n distances.sort(reverse=True)\n return distances or None", "def distance(p1, p2):\n return None", "def distances(self):", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def computeNearestNeighbor(username, users, distance_algorithm='euclidean'):\n distances = []\n for user in users:\n if user != username:\n if distance_algorithm == 'manhatten':\n distance = manhattan_distance(users[user], users[username])\n elif distance_algorithm == 'euclidean':\n distance = euclidean_distance(users[user], users[username])\n elif distance_algorithm == 'minkowski':\n distance = minkowski_distance(users[user], users[username], 5)\n distances.append((distance, user))\n\n # sort based on distance -- closest first!\n distances.sort()\n return distances" ]
[ "0.639866", "0.6343634", "0.61924297", "0.59297323", "0.59297323", "0.5853613", "0.5760782", "0.57106924", "0.56957674", "0.56786555", "0.56701374", "0.565588", "0.5652829", "0.56316733", "0.56216407", "0.5561049", "0.5556251", "0.55511826", "0.5511873", "0.550787", "0.5496774", "0.5494751", "0.5488978", "0.5482376", "0.54693925", "0.546848", "0.5462048", "0.5427062", "0.5424858", "0.5410035" ]
0.7659271
0
Manhattan Distance Calculator Calculates difference between current location and\ the goal location using Manhattan distance.
def calculate_manhattan_dist(self): return self._current_cost + abs(self._current_loc.get_row() - self._goal_loc.get_row()) +\ abs(self._current_loc.get_column() - self._goal_loc.get_column())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __manhattan(self, x_state, y_state, x_goal, y_goal):\n distance = (abs(x_state - x_goal) + abs(y_state - y_goal))\n return distance", "def manhattan_distance(self):\n return calculate_manhattan_distance(self.location, self.target_location)", "def manhattan_distance(self):\n x, y = self.start\n other_x, other_y = self.x, self.y\n print(abs(x - other_x) + abs(y - other_y))", "def manhattan_distance(x, y):\n return abs(x) + abs(y)", "def manhattan_heuristic(state, problem=None):\n return util.manhattanDistance(state[0], problem.goal)", "def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval", "def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)", "def manhattan_distance(origin, destination):\n return abs(destination.row - origin.row) + \\\n abs(destination.column - origin.column)", "def manhattanDistance(self):\n\n # Loop trough batteries and gridpoints calculate\n # manhattan distance between them\n for battery in self.batteries:\n for gridPoint in self.gridPoints:\n distance = (abs(gridPoint.xLocation - battery.xLocation)\n + abs(gridPoint.yLocation - battery.yLocation))\n gridPoint.manhattanDistance.append(distance)\n\n # If house on gridPoint, append distance to house\n for house in self.houses:\n if (house.xLocation == gridPoint.xLocation and\n house.yLocation == gridPoint.yLocation):\n house.manhattanDistance.append(distance)", "def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return(sum(tuple(abs(i-j) for i,j in zip(loc1,loc2))))\n # END_YOUR_CODE", "def heuristic_manhattan_distance(self):\n distance = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n i1, j1 = self._get_coordinates(self.position[i][j], self.PUZZLE_END_POSITION)\n distance += abs(i - i1) + abs(j - j1)\n\n return distance", "def return_manhattan_distance(coord1, coord2):\n x1, y1 = coord1\n x2, y2 = coord2\n\n return float(abs(x2-x1) + abs(y2-y1))", "def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist", "def manhattan_distance_between(start, destination):\n return abs(destination.x - start.x) + abs(destination.y - start.y)", "def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return sum([abs(loc2[i]-l1) for i, l1 in enumerate(loc1)])\n # END_YOUR_ANSWER", "def distManhattan(p1,p2):\n (x1,y1)=p1\n (x2,y2)=p2\n return abs(x1-x2)+abs(y1-y2)", "def manhattan_heuristic(pos, problem):\n return abs(pos[0] - problem.goal_pos[0]) + abs(pos[1] - problem.goal_pos[1])", "def manhattanDistance(Ship):\n\n return abs(Ship.n - Ship.s) + abs(Ship.e - Ship.w)", "def calculate_manhattan_dist(state):", "def ManhattanDistance(point1, point2):\n\n x1 = point1.x\n x2 = point2.x\n y1 = point1.y\n y2 = point2.y\n\n manhattandistance = np.abs(x1 - x2) + np.abs(y1 - y2)\n\n return manhattandistance", "def manhattan(self):\n distance = 0\n for i in range(3):\n for j in range(3):\n if self.plateau[i][j] != 0:\n x, y = divmod(self.plateau[i][j]-1, 3)\n distance += abs(x - i) + abs(y - j)\n return distance", "def _manhattan(pos1, pos2):\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))", "def manhattan_distance(x, y):\n return sum(abs(a - b) for a, b in zip(x, y))", "def calculate_manhattan(node_a, node_b):\n return (abs(node_a.x - node_b.x) + abs(node_a.y - node_b.y))", "def manhattan(x1, y1, x2, y2):\n return abs(x1 - x2) + abs(y1 - y2)", "def manhattan_dist(c1, c2):\n return abs(c1[0] - c2[0]) + abs(c1[1] - c2[1]) + abs(c1[2] - c2[2])", "def calc_manhattan(self, p_object):\n total = sum([self.manhattan(self[num], p_object[num]) for num in sorted(self.num_to_pos)[1:]])\n return total", "def manhattan(a, b):\n return abs(a[0] - b[0]) + abs(a[1] - b[1])", "def manhattan_distance(a: ArrayLike, b: ArrayLike) -> NDArrayFloat:\n\n return as_float(\n np.sum(np.abs(as_float_array(a) - as_float_array(b)), axis=-1)\n )", "def get_manhattan_distance(node):\n result = 0\n\n for idx, val in enumerate(node):\n if idx != val:\n result += abs(idx - val)\n\n return result" ]
[ "0.8165803", "0.8088598", "0.7407255", "0.73463696", "0.73238844", "0.7292226", "0.7271559", "0.7247168", "0.7209934", "0.71817213", "0.7162157", "0.71465975", "0.7138987", "0.71370137", "0.70733595", "0.7065889", "0.70469683", "0.70367557", "0.69594985", "0.69328445", "0.6893045", "0.6837461", "0.681442", "0.6744752", "0.6689913", "0.66768545", "0.6661329", "0.6641539", "0.66339475", "0.661586" ]
0.816172
1
Euclidean Distance Calculator Calculates difference between current location and\ the goal location using Euclidean distance.
def calculate_euclidean_dist(self): x_dist = self._current_loc.get_column() - self._goal_loc.get_column() y_dist = self._current_loc.get_row() - self._goal_loc.get_row() # Note ** is power operator in Python return self._current_cost + sqrt(x_dist**2 + y_dist**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euclidean_distance(self):\n return sqrt(pow((self.goal_pose.x - self.ground_truth_pose.x), 2) +\n pow((self.goal_pose.y - self.ground_truth_pose.y), 2))", "def getEuclideanDistance():\r\n global euclideanDistance\r\n return euclideanDistance", "def euclideanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return math.sqrt((loc1[1]-loc2[1])**2+(loc1[0]-loc2[0])**2)\n # END_YOUR_CODE", "def euclideanDistance(loc1, loc2):\n return math.sqrt(sum([(a - b) ** 2 for a, b in zip(loc1, loc2)]))", "def euclid_dist(location1, location2):\n return np.sqrt((location1[0] - location2[0]) ** 2 + (location1[1] - location2[1]) ** 2)", "def euclidean_distance(self, goal_pose):\n\t\treturn sqrt(pow((goal_pose.x - self.pose.x), 2) +\n\t\t\t\t pow((goal_pose.y - self.pose.y), 2))", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def euclidean_distance(self, goal_pose):\n return sqrt(pow((goal_pose.x - self.pose.x), 2) +\n pow((goal_pose.y - self.pose.y), 2))", "def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def eucledian_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n sqr_x = ((co_ords[index])[0] - (co_ords[goal.index(value)])[0])**2\r\n sqr_y = ((co_ords[index])[1] - (co_ords[goal.index(value)])[1])**2\r\n hval += math.sqrt(sqr_x + sqr_y)\r\n return hval", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def change_in_bodypart_euclidean_distance(\n self,\n location_1: np.ndarray,\n location_2: np.ndarray,\n fps: int,\n px_per_mm: float,\n time_windows: np.ndarray = np.array([0.2, 0.4, 0.8, 1.6]),\n ) -> np.ndarray:\n distances = self.framewise_euclidean_distance(\n location_1=location_1, location_2=location_2, px_per_mm=px_per_mm\n )\n return self._relative_distances(\n distances=distances, fps=fps, time_windows=time_windows\n )", "def euclidean_distance(start, end):\n\n value = np.sqrt(np.sum(np.square(np.subtract(start, end)), axis=-1))\n return value", "def euclideanDistance(x1,y1,x2,y2):\n distance = math.sqrt(abs(math.pow((x2-x1),2)) + abs(math.pow((y2-y1),2)))\n return distance", "def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def get_euclidean_distance(self, x_coord_1, x_coord_2, y_coord_1, y_coord_2):\r\n\r\n return math.sqrt(((x_coord_1 - x_coord_2) ** 2) + \\\r\n ((y_coord_1 - y_coord_2) ** 2))", "def euclidean_heuristic_cost(curr, end):\n curr_x, curr_y = curr\n end_x, end_y = end\n return sqrt((curr_x-end_x)**2 + (curr_y-end_y)**2)", "def _calc_distance(self, checkpoint_loc):\n return N.sqrt((self.current_location[1] - checkpoint_loc[1])**2 \\\n + (self.current_location[0] - checkpoint_loc[0])**2)", "def euclidean_heuristic(pos, problem):\n return ((pos[0] - problem.goal_pos[0]) ** 2 + (pos[1] - problem.goal_pos[1]) ** 2) ** 0.5", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def euclidean_distance(x1, y1, x2, y2):\n distance = math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))\n return distance", "def calculate_distance(srcLong, srcLat, dstLong, dstLat):\n return math.sqrt( (srcLong-dstLong) ** 2 + (srcLat - dstLat) ** 2)", "def euclidean(self, other):\n return linalg.norm([self.x - other.x, self.y - other.y])", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)" ]
[ "0.7016859", "0.696656", "0.6942588", "0.6918102", "0.6652748", "0.66375005", "0.65335757", "0.64920557", "0.6466708", "0.6416474", "0.63655174", "0.63655174", "0.63360476", "0.6332252", "0.6328849", "0.632643", "0.63199997", "0.63198346", "0.63112885", "0.62728566", "0.62400174", "0.6238428", "0.6230635", "0.62029696", "0.6163992", "0.61617875", "0.6152477", "0.61478615", "0.6145427", "0.6141303" ]
0.78475267
0
Direct Blocked Path Checker This function checks whether if all direct next moves from the current location are blocked by an unpassable object or the edge of the board. This can be used to determine a penalty factor when calculating the heuristic. This function is used in the made_up heuristics function.
def _is_all_direct_next_moves_blocked(self, reference_board=None): # Use untraversed board if none is specified if reference_board is None: reference_board = BoardPath._untraversed_board # Case #1 - Goal and Current Location in the Same Row if self._current_loc.get_row() == self._goal_loc.get_row(): # Case 1A - Need to move left but path is blocked if self._current_loc.get_column() > self._goal_loc.get_column() and\ not self.is_move_valid("l", reference_board): return True # Case 1B - Need to move left but path is blocked elif self._current_loc.get_column() < self._goal_loc.get_column() and\ not self.is_move_valid("r", reference_board): return True else: return False # Case #2 - Goal and Current Location in the Same Row if self._current_loc.get_column() == self._goal_loc.get_column(): # Case 2A - Need to move left but path is blocked if self._current_loc.get_row() > self._goal_loc.get_row() and\ not self.is_move_valid("u", reference_board): return True # Case 1B - Need to move left but path is blocked elif self._current_loc.get_row() < self._goal_loc.get_row() and\ not self.is_move_valid("d", reference_board): return True else: return False # Case #3 - Goal and current location are diagonal from one another else: number_invalid_conditions = 0 # Case 3A - Check if need to move down but it is blocked if self._current_loc.get_row() < self._goal_loc.get_row() \ and not self.is_move_valid("d", reference_board): number_invalid_conditions += 1 # Case 3B - Check if need to move up but it is blocked if self._current_loc.get_row() > self._goal_loc.get_row() \ and not self.is_move_valid("u", reference_board): number_invalid_conditions += 1 # Case 3C - Check if need to move right but it is blocked if self._current_loc.get_column() < self._goal_loc.get_column() \ and not self.is_move_valid("r", reference_board): number_invalid_conditions += 1 # Case 3D - Check if need to move left but it is blocked if self._current_loc.get_column() > self._goal_loc.get_column() \ and not self.is_move_valid("l", reference_board): number_invalid_conditions += 1 # Only two direct moves when need to move diagonal. If invalid # count equals two, then return true as condition met. if number_invalid_conditions == 2: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_move_knight_legally_blocked(self):\n for piece in [('N', True), ('N', False)]:\n self.c.board = \\\n [[('K', piece[1]) for i in range(8)] for i in range(8)]\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def Breakwalls(self):\n \n \n if len(self.VisitedCoord)==self.TotalCells: #Base case for the recursive call.\n \n return self.VisitedCoord #When base case is hit, returns the list of all the visited cells. [[x,y],[x,y],[x,y],[x,y]]\n xval=self.CurrentCell[0] #Breaks Current Cell up, xval is the x value \n yval=self.CurrentCell[1] #yval is the y value\n \n \n if (yval+1==self.N+1 or [xval,yval+1] in self.VisitedCoord) and (yval-1==0 or [xval,yval-1] in self.VisitedCoord) \\\n and (xval+1==self.N+1 or [xval+1,yval] in self.VisitedCoord) and (xval-1==0 or [xval-1,yval] in self.VisitedCoord): #If the Cell is surrounded\n #and can't move \n self.CurrentCell=self.track.pop(self.CellStack) #Pop the last coord from the cell stack and make that current cell.\n #print(\"Current: \", self.CurrentCell)\n return self.Breakwalls() #Recursive call to Breakwalls \n \n self.track.push(self.CurrentCell,self.CellStack) #If cell not surrounded push the current cell onto the cellstack and begin looking for a neighbour \n while True: #Remember Cell stack is where you out your foot down.\n Directions=[\"North\",\"South\",\"East\",\"West\"]\n randir=randrange(0,len(Directions))\n dir=Directions[randir] #Choose a random direction \n #print(dir,yval+1,self.CurrentCell,self.VisitedCoord)\n \n if dir== \"North\" and yval+1<self.N+1 and [xval,yval+1] not in self.VisitedCoord: #if direction and not out of bounds. Self.N+ is the border.\n self.North[xval][yval]=self.South[xval][yval+1] = False #if less than that, you are within the border \n yval+=1;break \n elif dir ==\"South\" and yval-1>0 and [xval,yval-1] not in self.VisitedCoord: #in the southern part, 0 is the border.if >0, within actual maze.\n self.South[xval][yval]=self.North[xval][yval-1] = False \n yval-=1;break \n elif dir ==\"East\" and xval+1 <self.N+1 and [xval+1,yval] not in self.VisitedCoord:\n self.East[xval][yval]=self.West[xval+1][yval] = False\n xval+=1;break \n elif dir ==\"West\" and xval-1 > 0 and [xval-1,yval] not in self.VisitedCoord:\n self.West[xval][yval]=self.East[xval-1][yval] =False\n xval-=1;break\n\n #Above chooses a random direction and if condition checks out, breaks the wall by setting it to false and increments/decrements the respective value\n #to reflect N/S/E/W.\n self.CurrentCell=[xval,yval] #xval/yval was incremented so the new value remains, all thats left is to make the current cell that new coord.\n \n self.track.push(self.CurrentCell,self.VisitedCoord) #The new current cell is now pushed onto the visited coordinates stack \n \n return self.Breakwalls() ##Recursive call on the current cell. Everything happens again on that new coordinate. ", "def check_directions(next_door, current_node, goal_node, chip, crossroad, travelled_path, colide): \n if next_door[2] < 0 or next_door[2] > 7:\n return crossroad\n\n # Check if the node is off the grid\n if next_door[0] < 0 or next_door[0] > chip.width - 1 or next_door[1] < 0 or next_door[1] > chip.height - 1:\n return crossroad\n\n (x, y, z) = current_node.position\n\n # Check whether a connection is already being used\n if chip.coordinates[z][y][x].connections[next_door].used:\n return crossroad\n\n next_node = chip.coordinates[next_door[2]][next_door[1]][next_door[0]]\n\n neighbour = nd.Node(next_door, current_node, next_node.cost, next_node.cost + next_node.distance_to_goal)\n\n if neighbour != goal_node and chip.coordinates[next_door[2]][next_door[1]][next_door[0]].gate is not None:\n return crossroad\n\n # Check whether the coordinate is already in the current path.\n if neighbour in travelled_path:\n return crossroad\n\n # Check whether neighbor is in open list and if it has a lower cost value\n if add_to_crossroad(neighbour, crossroad, colide):\n crossroad.append(neighbour)\n\n return crossroad", "def propogate(self):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n for DIR in [[1,0], [-1,0], [0,1], [0,-1]]:\r\n target_x, target_y = self.block_loc[0]+DIR[0], self.block_loc[1]+DIR[1]\r\n if 0 <= target_x < X and 0 <= target_y < Y: #if inbounds:\r\n target_block = grid[target_y][target_x]\r\n if not target_block.collapsed: #only ping uncollapsed blocks\r\n self.send_update(target_block,DIR)\r\n return", "def bfs(game, game_coords):\n # *** main queue to record steps and corresponding costs ***\n queue_moves = [[game.player.row, game.player.col]]\n cost_moves = [0]\n\n # record cost and illegal moves\n cost = 1\n declined_moves = []\n\n # record the moves in the previous turn(iteration)\n last_steps = [[game.player.row, game.player.col]]\n\n # ***** Step 1: Marking game board using cost *****\n while True:\n\n # struggled in a location, loss\n if not last_steps:\n return 0, 0, 0\n\n # collect all potential moves: left, down, right, up, teleport(if possible)\n potential_steps = []\n for step in last_steps:\n potential_steps.append(left(step))\n potential_steps.append(down(step))\n potential_steps.append(right(step))\n potential_steps.append(up(step))\n\n if search_coords(game_coords, step) in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n potential_steps.append(step)\n\n current_steps = []\n for step in potential_steps:\n if step in declined_moves:\n continue\n elif step in queue_moves:\n # the step existed in main queue, replace it if cost is lower, otherwise skip\n if cost >= cost_moves[queue_moves.index(step)]:\n if step != queue_moves[-1]:\n continue\n\n # check if move is legal\n will_move = step\n item = search_coords(game_coords, will_move)\n\n if item == '*' or item == -1:\n declined_moves.append(will_move)\n continue\n\n elif item == 'W':\n game.player.num_water_buckets += 1\n\n for i in range(len(game_coords['W'])):\n # water picked up, set current display from 'W' to ' ' in game_coords\n if game_coords['W'][i] == will_move:\n game_coords['W'].pop(i)\n game_coords[' '].append(will_move)\n break\n\n elif item == 'F':\n if game.player.num_water_buckets < 1:\n # cannot put out fire, refuse this move :(\n declined_moves.append(will_move)\n continue\n\n game.player.num_water_buckets -= 1\n elif item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n if coords != will_move:\n will_move = coords\n break\n\n current_steps.append(will_move)\n\n # append to main queue\n queue_moves.append(will_move)\n cost_moves.append(cost)\n\n cost += 1\n\n # reach end point\n if game_coords['Y'][0] in current_steps:\n break\n\n # last_steps <- current_steps\n last_steps = []\n last_steps.extend(current_steps)\n\n cost -= 1\n\n # ***** Step 2: recall through main queue to generate a path *****\n # *** Queue: last in first out ***\n recall_moves = queue_moves[::-1]\n recall_cost = cost_moves[::-1]\n cursor = recall_moves[0]\n\n # generated path\n route = []\n\n # 'action to cmd' translator\n action_map = {(1, 0): 'w', (-1, 0): 's', (0, 1): 'a', (0, -1): 'd'}\n\n for i in range(len(recall_moves)):\n if recall_cost[i] == cost - 1:\n x, y = coords_sub(recall_moves[i], cursor)\n\n # simple move: left, down, right, up\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport move\n elif teleport_pair(cursor, game_coords) != -1:\n pair = teleport_pair(cursor, game_coords)\n x, y = coords_sub(recall_moves[i], pair)\n\n # teleport after simple move\n if abs(x) + abs(y) == 1:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, action_map[(x, y)])\n\n # teleport after no move ('e')\n elif abs(x) + abs(y) == 0:\n cursor = recall_moves[i]\n cost -= 1\n route.insert(0, 'e')\n\n # convert list of paths to string\n trace = ''\n for action in route:\n trace += action + ', '\n\n return 1, cost_moves[-1], trace", "def safe(self): \n wall_far = self.distmin > self.distmax*0.6\n # Check which way to go\n if wall_far:\n self.at_wall()\n return wall_far", "def isBlocked(mapObj, gameStateObj, x, y):\n\n if isWall(mapObj, x, y):\n return True\n\n elif x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return True # x and y aren't actually on the map.\n\n elif (x, y) in gameStateObj['stars']:\n return True # a star is blocking\n\n return False", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n # Make sure the position you're going into isn't your own piece\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n # Checking diagonals in the palace\n if cur_pos and new_pos in self._special:\n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking all possible diagonals\n elif new_row == cur_row + 1 and new_col == cur_col + 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row - 1 and new_col == cur_col - 1 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_row + 2 and new_col == cur_col + 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n elif new_row == cur_col - 2 and new_row == cur_col - 2 and self.check_path(cur_pos, new_pos, board, state) is True:\n return True \n # Checking if the movement is in the same column\n if new_col == cur_col and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n # Checking if the movement is in the same row\n elif new_row == cur_row and self.check_path(cur_pos, new_pos, board, state) is True:\n return True\n else:\n return False\n else:\n return False", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True", "def astarSearchWithoutAdmissible(self):\n\n self.frontier = PriorityQueueImpl(self.priorityByHeuristicFunction)\n self.explored = set()\n\n if self.__isTileGoalState(self.startingPoint):\n print(\"Path is found: \" + str(self.startingPoint) + \" with 0 cost\")\n return\n\n self.frontier.enqueue(TileWithHeuristic(self.startingPoint, [], 0, not self.isAllTileAdmissible))\n\n while self.frontier.isEmpty() != True:\n tile = self.frontier.dequeue()\n tileCoordinate = tile.coordinate\n tileCost = tile.cost\n\n if self.__isTileGoalState(tileCoordinate):\n self.__printThePath(tile)\n return\n\n self.explored.add(tile)\n\n adjacentList = self.__findAdjacentsToThisPoint(tileCoordinate)\n for eachPoint in adjacentList:\n if not self.__isTileWall(eachPoint):\n eachTile = TileWithHeuristic(eachPoint, tile.pathToTile, self.__getElementFromPairs(eachPoint),\n not self.isAllTileAdmissible)\n if self.__isTileGoalState(eachTile.coordinate):\n eachTile.heuristic = 0\n if not self.__isTileInExplored(eachTile):\n eachTile.cost = self.__getElementFromPairs(eachPoint) + tileCost + 1\n eachTile.heuristicFunction = eachTile.cost + eachTile.heuristic\n self.frontier.enqueue(eachTile)", "def check_neighbours(matrix, cur_pos, visited):\n visited[cur_pos[0]][cur_pos[1]] = True\n\n for i in range(num_of_neighbours):\n cur_neighbour = (cur_pos[0]+neighbours_positions[i][0], cur_pos[1]+neighbours_positions[i][1])\n if is_safe(matrix, cur_neighbour, visited):\n check_neighbours(matrix, cur_neighbour, visited)", "def test_move_knight_illegally(self):\n self.c.board = [[(0, 0) for i in range(8)] for i in range(8)]\n for piece in [('N', True), ('N', False)]:\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n dests = [col + row for col in 'abcdefgh' for row in '12345678']\n for dest in dests:\n if dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n continue\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def _get_heuristic(self, game):\r\n board = game._get_bord()\r\n player = game._current_player\r\n size = game._size\r\n\r\n # [1] The more pawns one has compared to the number of pawns\r\n # the opponent has, the better.\r\n\r\n count_delta = self._get_fields_delta(board, player)\r\n\r\n # [2] The further advanced a pawn, the better.\r\n # Free paths are great.\r\n\r\n adv_board = deepcopy(board)\r\n usr_now_blocked = [False] * size\r\n opp_now_blocked = [False] * size\r\n\r\n # Traversal of board backwards for performance reasons.\r\n # (free paths flags)\r\n # Of course this could also be done by flipping calculation of\r\n # the row indices. But that seems counterintuitive.\r\n for r in range(size - 1, -1, -1):\r\n for c in range(size):\r\n # Row indices for both perspectives.\r\n # We will be travelling the board from both ends\r\n # at the same time.\r\n r_opp = r\r\n r_usr = size - 1 - r\r\n\r\n # Perspective of Player.USER.\r\n if board[r_usr][c] == Player.OPP:\r\n # If this field is occupied by the Player.OPP\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.USER can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.USER's pawns less advanced.\r\n usr_now_blocked[c] = True\r\n elif board[r_usr][c] == Player.USER:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_usr][c] *= (r_usr - 1) * (r_usr - 1)\r\n\r\n if not usr_now_blocked[c]:\r\n adv_board[r_usr][c] *= 10 # TODO: choose best weight\r\n\r\n # Perspective of Player.OPP.\r\n if board[r_opp][c] == Player.USER:\r\n # If this field is occupied by the Player.USER\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.OPP can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.OPP's pawns less advanced.\r\n opp_now_blocked[c] = True\r\n elif board[r_opp][c] == Player.OPP:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_opp][c] *= (r_opp - 1) * (r_opp - 1)\r\n\r\n if not opp_now_blocked[c]:\r\n adv_board[r_opp][c] *= 10 # TODO: choose best weight\r\n\r\n adv_delta = self._get_fields_delta(adv_board, player)\r\n\r\n # We refrain from adjusting weights of both aspects. Could be\r\n # optimized by collecting data.\r\n return adv_delta + count_delta", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def hill_climbing(self, variant='sideway', board=None, limit_sideway=100):\n if board and variant == 'sideway':\n current_board = board\n no_local_steps = 0\n while current_board.hcost != 0:\n print(current_board)\n best_neighbor, _ = self.get_best_neighbor(current_board, allow_sideway=False)\n if best_neighbor.hcost < current_board.hcost:\n current_board = best_neighbor\n no_local_steps += 1\n else:\n counter_sideway = 0\n updated = True\n while (best_neighbor.hcost >= current_board.hcost) and counter_sideway <=limit_sideway:\n print(current_board)\n current_board = best_neighbor\n no_local_steps += 1\n counter_sideway +=1\n best_neighbor, updated = self.get_best_neighbor(current_board, allow_sideway=True)\n if not updated:\n break\n if counter_sideway > limit_sideway or not updated:\n break\n current_board = best_neighbor\n print(current_board)\n if current_board.hcost != 0:\n print('SOLUTION NOT FOUND!!!')\n self.no_total_steps += no_local_steps\n else:\n print ('SOLUTION FOUND!!!')\n self.no_success += 1\n self.no_success_steps += no_local_steps\n self.no_total_steps += no_local_steps", "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def is_path_correct(x, y, path, board):\n endX, endY = x, y\n for d in path:\n if board[endY][endX] == 1:\n return False\n if d == 'U':\n endY -= 1\n elif d == 'D':\n endY += 1\n elif d == 'R':\n endX += 1\n else:\n endX -= 1\n if len(board) - 1 > endY and len(board[0]) - 1 > endX:\n neighbours = get_neighbours(endX, endY, board)\n if 8 in neighbours:\n return append_step(path, neighbours)", "def astarSearchWithAdmissible(self):\n\n self.frontier = PriorityQueueImpl(self.priorityByHeuristicFunction)\n self.explored = set()\n\n if self.__isTileGoalState(self.startingPoint):\n print(\"Path is found: \" + str(self.startingPoint) + \" with 0 cost\")\n return\n\n self.frontier.enqueue(TileWithHeuristic(self.startingPoint, [], 0, self.isAllTileAdmissible))\n\n while self.frontier.isEmpty() != True:\n tile = self.frontier.dequeue()\n tileCoordinate = tile.coordinate\n tileCost = tile.cost\n\n if self.__isTileGoalState(tileCoordinate):\n self.__printThePath(tile)\n return\n\n self.explored.add(tile)\n\n adjacentList = self.__findAdjacentsToThisPoint(tileCoordinate)\n for eachPoint in adjacentList:\n if not self.__isTileWall(eachPoint):\n eachTile = TileWithHeuristic(eachPoint, tile.pathToTile, self.__getElementFromPairs(eachPoint), self.isAllTileAdmissible)\n if self.__isTileGoalState(eachTile.coordinate):\n eachTile.heuristic = 0\n if not self.__isTileInExplored(eachTile):\n eachTile.cost = self.__getElementFromPairs(eachPoint) + tileCost + 1\n eachTile.heuristicFunction = eachTile.cost + eachTile.heuristic\n self.frontier.enqueue(eachTile)", "def verify_legal_move(self, direction):\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n\n if direction == \"LEFT\":\n b_x -= 1\n elif direction == \"RIGHT\":\n b_x += 1\n elif direction == \"DOWN\":\n b_y += 1\n else:\n raise ValueError\n\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB", "def check_move(blocking):\n funcs = {\n \"up\": up,\n \"down\": down,\n \"left\": left,\n \"right\": right,\n \"attack\": attack,\n \"back\": back\n }\n passback = False\n for i in ACTIONS:\n if ACTIONS[i] and i not in blocking:\n funcs[i]()\n passback = True\n return passback", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def _update_valid_directions(self, valid_directions, velocity):\n # If not preventing backtracking, all open directions are valid\n if not self._prevent_backtracking:\n return\n axis = np.argmax(np.abs(velocity))\n direction = np.sign(velocity[axis])\n\n # If velocity is zero, all open directions are valid\n if direction == 0:\n return\n \n # If hit a wall and allow wall backtracking, all open directions are\n # valid\n can_continue = valid_directions[axis, int(0.5 * (1 + direction))]\n if not can_continue and self._allow_wall_backtracking:\n return\n # If not hit a wall and only turn at wall, then continue\n if can_continue and self._only_turn_at_wall:\n valid_directions.fill(0)\n valid_directions[axis, int(0.5 * (1 + direction))] = 1\n return\n\n # If none of the above conditions are true, prevent backtracking\n valid_directions[axis, int(0.5 * (1 - direction))] = False", "def _get_easy_move(self):\n # Two lists keep track of regular moves and paths of possible jumps\n moves = []\n required_jumps = []\n for row in range(self._checkers.dimension):\n for col in range(self._checkers.dimension):\n # Check same color pieces as player to see if they can jump.\n if self._checkers.get(row, col) == self._player:\n path_made = False\n found_jumps = []\n # i represents an option of direction to check\n for i in range(3):\n current_path = []\n # Builds a path of jumps by checking for a jump each\n # move\n while (not path_made):\n jumps = self.check_for_jump(self._player, row, col)\n if jumps == []:\n path_made = True\n break\n current_path += (jumps)\n incrow = 0\n inccol = 0\n # South west and east\n if i == 0:\n incrow = 2\n inccol = 2\n elif i == 1:\n incrow = 2\n incrow = -2\n # North west and east\n elif i == 2:\n incrow = -2\n inccol = -2\n elif i == 3:\n incrow = -2\n incrow = 2\n row += incrow\n col += inccol\n found_jumps.append(current_path)\n if len(found_jumps) > 0:\n # If there is at least one path then we append it\n # to the list of jumps\n required_jumps += found_jumps\n else:\n # Checks if a move can be made in each direction\n north_west = self._checkers.get(row - 1, col - 1)\n north_east = self._checkers.get(row - 1, col + 1)\n south_west = self._checkers.get(row + 1, col + 1)\n south_east = self._checkers.get(row + 1, col - 1)\n if north_west == CheckersBoard.empty:\n moves.append(Move(row, col, -1, -1))\n if north_east == CheckersBoard.empty:\n moves.append(Move(row, col, -1, 1))\n if south_west == CheckersBoard.empty:\n moves.append(Move(row, col, 1, 1))\n if south_east == CheckersBoard.empty:\n moves.append(Move(row, col, 1, -1))\n # A random move is calculated for the lists of moves\n # If a move can be made we prioritize the list with possible moves\n random_index = 0\n if len(required_jumps) != 0:\n random_index = random.randint(0, len(required_jumps))\n move_path = required_jumps[random_index]\n return move_path\n else:\n random_index = random.randint(0, len(moves))\n move = moves[random_index]\n return [move]", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def passable(self, point):\n return point not in self.obstacles" ]
[ "0.61759573", "0.61158156", "0.5978466", "0.5958256", "0.59319913", "0.5661901", "0.56494457", "0.56460166", "0.5641585", "0.5622185", "0.5597397", "0.5537868", "0.54507625", "0.5450352", "0.5431192", "0.54221904", "0.5419678", "0.5415468", "0.5415228", "0.541518", "0.53948724", "0.53940666", "0.5375859", "0.5372002", "0.5371236", "0.5362984", "0.536168", "0.53573835", "0.5340144", "0.53053534" ]
0.7079593
0
Move Location Calculator Calculates the new location for a move in the specified direction.
def _calculate_move_location(self, direction): current_row = self._current_loc.get_row() current_column = self._current_loc.get_column() # Calculate the new location for a left move if (direction == "l"): return Location(current_row, current_column - 1) # Calculate the new location for an up move elif (direction == "u"): return Location(current_row - 1, current_column) # Calculate the new location for a right move elif (direction == "r"): return Location(current_row, current_column + 1) # Calculate the new location for a down move elif (direction == "d"): return Location(current_row + 1, current_column) return Location()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNewLocation(self, currentLocation, directionalMovement):\n x = currentLocation[0] + directionalMovement[0]\n y = currentLocation[1] + directionalMovement[1]\n return (x, y)", "def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation", "def move(self, deltaX, deltaY):\n\t\treturn Location(self.x + deltaX, self.y + deltaY)", "def calMove(playerLocation, nextLocation):\n move_vector = tuple(np.subtract(nextLocation, playerLocation))\n for MOVE in DIRECTION_TO_CALCULATION:\n if move_vector == DIRECTION_TO_CALCULATION[MOVE]:\n return MOVE\n return \"Not right\"", "def move(self, direction):\n # Ensure the move is valid\n assert self.is_move_valid(direction), \"Tried to make an invalid move\"\n # Calculate the move location.\n self._current_loc = self._calculate_move_location(direction)\n # Update the path.\n self._path.append(self._current_loc)\n # Increment the move cost.\n self._current_cost = self._current_cost + 1", "def move(self, deltaX, deltaY):\n return Location(self.x + deltaX, self.y + deltaY)", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def move(self, delta_x, delta_y):\n return Location(self._x + delta_x, self._y + delta_y)", "def move(coord, direction):\n vInc, hInc = dirToIncrement(direction)\n return (coord[0]+vInc, coord[1]+hInc)", "def move(self, position, direction):\n i, j = position\n direction %= 360\n if direction == 0:\n return (i - 1, j)\n if direction == 90:\n return (i, j + 1)\n if direction == 180:\n return (i + 1, j)\n if direction == 270:\n return (i, j - 1)\n raise ValueError(f\"Maze.move called with bad angle = {direction}\")", "def move(self):\n c = self.get_position()\n\n f = c['f']\n if f == 'NORTH':\n c['y'] += 1\n elif f == 'EAST':\n c['x'] += 1\n elif f == 'SOUTH':\n c['y'] -= 1\n elif f == 'WEST':\n c['x'] -= 1\n\n if self.valid_position(c):\n self.update_position(c)\n else:\n raise ValueError('InvalidPosition')", "def get_final_location(*, from_location, with_orientation, with_plan_actions):\n location = from_location\n orientation = with_orientation\n plan_actions = with_plan_actions\n\n agent_location = location\n agent_orientation = orientation\n\n for action in plan_actions:\n if action == Hunter.Actions.MOVE:\n agent_location = agent_location + agent_orientation\n elif action == Hunter.Actions.LEFT:\n agent_orientation = -agent_orientation.get_perpendicular_vector_clockwise()\n elif action == Hunter.Actions.RIGHT:\n agent_orientation = agent_orientation.get_perpendicular_vector_clockwise()\n\n return (agent_location, len(plan_actions) + 9 if Hunter.Actions.SHOOT in plan_actions \n else len(plan_actions))", "def get_new_coordinate(x_y_coordinate: dict, move_direction: str) -> tuple:\n direction_dict = {'n': (0, -1), 's': (0, 1), 'w': (-1, 0), 'e': (1, 0)}\n x = x_y_coordinate['x'] + direction_dict[move_direction][0]\n y = x_y_coordinate['y'] + direction_dict[move_direction][1]\n return x, y", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def move(self, direction):\n try:\n\n if self.in_thing:\n print(\"You have to get out of the \" + str(*self.in_thing[-1]) +\n \" first\")\n return self\n if direction == 'north':\n if self.finished_places == 12:\n self.finished_places += 1\n return North(self.items, self.finished_places)\n if direction == 'up':\n if self.finished_places == 4:\n self.finished_places += 1\n return Up(self.items, self.finished_places)\n if direction == 'east':\n if self.finished_places == 2:\n self.finished_places += 1\n return East(self.items, self.finished_places)\n except AttributeError:\n self.items = []\n return self.move(direction)\n print(' you didn\\'t listen to my very subtle hints, i know it was hard'\n ' your lost now. if you remember the commands i told you you can'\n ' go back to where you left off and continue, just type \"QUIT\"')\n return Place(self.items, self.finished_places)\n\n # implement\n # return new instance on class", "def move_toward(state, location):\n return move_relative(state, location, True)", "def update_location(self):\n if self.simulation:\n return (self.y, self.x)\n else:\n raise NotImplementedError\n\n self.y = new_y\n self.x = new_x\n\n return (new_y, new_x)", "def move(self, direction: Direction) -> \"TilePosition\":\r\n return TilePosition(self.tile_x + direction.dx, self.tile_y + direction.dy)", "def moved(self, loc: Location) -> \"Mate\":\n\n def move(origin: Vector, vec: Vector, loc: Location) -> Tuple[Vector, Vector]:\n reloc = cast(Edge, Edge.makeLine(origin, origin + vec).moved(loc))\n v1, v2 = reloc.startPoint(), reloc.endPoint()\n return v1, v2 - v1\n\n origin, x_dir = move(self.origin, self.x_dir, loc)\n _, z_dir = move(self.origin, self.z_dir, loc)\n return Mate(origin, x_dir, z_dir)", "def new_location (x, y):\n North, South, West, East = walls(x,y)\n while True:\n direction = input('Direction: ').upper()\n\n if direction == 'N' and North:\n y += 1\n break\n elif direction == 'S' and South:\n y -= 1\n break\n elif direction == 'E' and East:\n x += 1\n break\n elif direction == 'W' and West:\n x -=1\n break\n else:\n print('Not a valid direction!')\n return x, y", "def move_relative(state, location, towards):\n move_options = util.move_options_to_list(state['move_options'])\n\n move_options = [m for m in move_options if m['type'] == 'move']\n\n if len(move_options) == 0:\n return None\n\n my_location_ = my_location(state)\n move_targets = np.array([m['target'] for m in move_options])\n target_locations = move_targets + my_location_\n\n distances_ = distances(location, target_locations)\n\n if towards:\n target_index = np.argmin(distances_)\n else:\n target_index = np.argmax(distances_)\n\n return move_options[target_index]", "def get_new_position(cls, position_x, position_y, direction):\n new_position_x = cls.calculate_position_x(position_x, direction)\n new_position_y = cls.calculate_position_y(position_y, direction)\n return new_position_x, new_position_y", "def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)", "def move(self, direction):\n path = self.location.get_path(direction)\n if path is not None:\n if not path.blocked:\n # exit current location\n self.location.exit()\n\n # enter new location\n self.location = path.destination\n self.location.enter()\n\n self.on_move.trigger()\n else:\n self.echo(self.text(\"PATH_BLOCKED\", direction=direction))\n else:\n self.echo(self.text(\"NO_PATH\", direction=direction))", "def _coordinate_after_moving(self, direction, coordinate):\n\n if direction == 'N':\n new_coordinate = Coordinate(coordinate.x, coordinate.y + 1)\n elif direction == 'S':\n new_coordinate = Coordinate(coordinate.x, coordinate.y - 1)\n elif direction == 'W':\n new_coordinate = Coordinate(coordinate.x - 1, coordinate.y)\n else:\n new_coordinate = Coordinate(coordinate.x + 1, coordinate.y)\n\n if not self._is_coordinate_in_the_grid(new_coordinate):\n raise RoverException(ExceptionMessages.OFF_GRID)\n\n if self._is_coordinate_occupied(new_coordinate):\n raise RoverException(ExceptionMessages.ROVER_COLLISION)\n\n return new_coordinate", "def get_move(self, direction):\n pos = self._state.index(0)\n row = pos // self._size\n col = pos % self._size\n moves = get_moves(self._size, col, row)\n new_state = self._state\n if direction in moves:\n if moves[direction]['is_movable']:\n new_state = move(self._state, pos, moves[direction]['rel_pos'])\n return Node(new_state, heuristic=self._heuristic,\n g_score=self._g_score+self._cost(self._state, new_state))", "def get_new_origin(self, direction=None):\n y, x = 1, 0\n direction_coords = {'origin': (0, 0), 'right': (0, 1), 'left': (0, -1)}\n if direction and direction in direction_coords:\n y, x = direction_coords[direction]\n return (self.origin[0] + y, self.origin[1] + x)", "def moveDirection(direction):\r\n global location\r\n\r\n if direction in worldRooms[location]:\r\n print('You move to the %s.' % direction)\r\n location = worldRooms[location][direction]\r\n displayLocation(location, default)\r\n else:\r\n print('You cannot move in that direction')", "def update_pos(self, move):\n change = Maze.moves[move]\n self.current_pos[0] += change[0]\n self.current_pos[1] += change[1]", "def move(self, direction: str) -> int:\n # O(1) per move\n\n cur_pos = self.positions[-1]\n\n move = self.moves[direction]\n new_pos = cur_pos[0] + move[0], cur_pos[1] + move[1]\n\n if new_pos[0] == self.height or new_pos[0] == -1 or new_pos[1] == self.width or new_pos[1] == -1 or (new_pos in self.positions_set and new_pos != self.positions[0]):\n return -1\n\n self.positions.append(new_pos)\n self.positions_set.add(new_pos)\n\n if self.eaten < len(self.food) and new_pos == self.food[self.eaten]:\n self.eaten += 1\n else:\n tail = self.positions.popleft()\n\n if tail != self.positions[-1]:\n self.positions_set.remove(tail)\n\n return self.eaten" ]
[ "0.6761584", "0.65838546", "0.6551763", "0.6501945", "0.6485612", "0.6441349", "0.6333201", "0.623619", "0.62274575", "0.6121013", "0.6087272", "0.60715115", "0.60713834", "0.6050827", "0.6002279", "0.59576184", "0.5944363", "0.59366095", "0.5923641", "0.59176326", "0.5860694", "0.58500755", "0.5840367", "0.5816014", "0.58086395", "0.5790771", "0.577743", "0.577234", "0.5763323", "0.57553667" ]
0.8537776
0
Goal Setter This function sets the goal for the board.
def set_goal(goal_loc): BoardPath._goal_loc = goal_loc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_goal(self, goal: GoalType) -> None:\n self.goal = goal", "def goal(self, goal):\n\n self._goal = goal", "def set_goal(self, goal):\r\n self.goal = goal\r\n self.start_time = self.get_current_time()", "def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)", "def set_goal(self, x):\n self.controllers[0].set_goal(x)\n self.controllers[1].set_goal(x)", "def set_goal(self, goal):\n self._pid_lock.acquire() # Acquire Lock\n self._goal = goal\n self._pid_lock.release() # Release Lock", "def update_goal(self):\n pass", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "def setGoalNode(self, newGoal):\r\n\t\tself.goalNode = newGoal", "def set_goal(self, robot_id, task, pub_msg): \n pub_names = self.goal_pubs.keys()\n pub_objs = self.goal_pubs.values()\n for i in range(len(pub_names)):\n if robot_id == int(pub_names[i]):\n Goal = MoveBaseActionGoal()\n Goal.header.stamp = rospy.Time.now()\n Goal.header.frame_id = ''\n Goal.goal_id.stamp = rospy.Time.now()\n Goal.goal_id.id = str(int(task[0]))\n Goal.goal.target_pose.header.stamp = rospy.Time.now()\n Goal.goal.target_pose.header.frame_id = 'map'\n Goal.goal.target_pose.pose.position.x = task[1]\n Goal.goal.target_pose.pose.position.y = task[2]\n z_rot_rad = task[3] * np.pi / 180\n q = quaternion_from_euler(0, 0, z_rot_rad)\n Goal.goal.target_pose.pose.orientation.z = q[2]\n Goal.goal.target_pose.pose.orientation.w = q[3]\n pub_obj = pub_objs[i]\n pub_obj.publish(Goal)\n print(\"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \".\")\n msg_str = \"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \". Time: %s\" % rospy.Time.now().to_sec()\n pub_msg.publish(msg_str)\n break\n else:\n pass", "def assign_goal(self, goal_index):\n gearbox_index = int(np.floor(goal_index / self.cables_per_gearbox))\n cable_index = goal_index - gearbox_index * self.cables_per_gearbox\n # Activate the goal\n self.gearboxes[gearbox_index].hub_cable_goals[cable_index] = 1.", "def __init__(self, goal_pos=0.72, *args, **kwargs):\n super(Goalie, self).__init__(*args, **kwargs)\n self.goal=goal_pos", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def _set_task(self, goal):\n if goal.actionID == 'dh_change':\n self.dh_change(goal)\n elif goal.actionID == 'set_rcvel':\n self.set_rcvel(goal)\n elif goal.actionID == 'gate_pass':\n self.gate_pass(goal)\n elif goal.actionID == 'object_center':\n self.object_center(goal)\n elif goal.actionID == 'arm':\n self.arm(goal.arm)\n elif goal.actionID == 'rc_off':\n self.rc_off()\n else:\n rospy.loginfo('%s actionID not recognized'%goal.actionID)", "def __init__(self, goal):\n self.goal = None\n self.goal_state_value_dict = dict()\n self.num_goals_to_satisfy = 0\n self.set_goal(goal)", "def _update_goals(self):\n print\"updating goals\"\n response = self.goal_tracker_call() # type: GoalsResponse\n self._goals = []\n for goal in response.goals: # type: Point\n self._goals.append([goal.x, goal.y, goal.z])\n self._num_goals = len(self._goals)\n\n self._current_belief = self._init_belief()", "def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length", "def set_goal_pos(self):\n goal_list = np.where(self.value_map == self.value_map.max())\n # assume the first one\n self.goal_pos = (goal_list[0][0], goal_list[1][0])", "def decideOnGoal(self):\r\n\r\n\t\tself.goalNode = self.simulationHandle.getMap().getRandomNode()", "def __init__(self, initial, goal=(3, 3, 0, 0, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def set_heuristic(heuristic):\n BoardPath._heuristic = heuristic", "def goals_per_game(self, goals_per_game):\n\n self._goals_per_game = goals_per_game", "def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):\n\n self.goal = goal\n Problem.__init__(self, initial, goal)", "def __move(self):\n if self.goal is None:\n if self.tasks:\n self.goal = self.tasks.pop()\n self.goal_history.append(self.goal)\n self.logger.log(\n f\"Crewmate {self.agent_id} set as goal: {self.goal.name} in\" +\n f\" {self.game_map.room_names[self.goal.room_id]}\",\n Logger.LOG | Logger.PRINT_VISUAL)\n else:\n self.room = self.game_map.move_random(self)\n self.location_history.append(self.room)\n return\n\n if self.room is not self.goal.room_id:\n self.room = self.game_map.next_toward(self, self.goal.room_id)\n\n # Log the current room we are in: Either the room we moved to, or the room that happens to be the goal room\n self.location_history.append(self.room)", "def place_goal(self, point = \"random\"):\n if point != \"random\":\n self.maze[point[0]][point[1]] = self.goal_value\n self.goal_position = tuple([point[0],point[1]])\n\n else:\n # Find blank spaces for goal placement\n curr_blanks = np.where(self.maze == self.blank_value)\n\n # In a 2d array, curr_blanks should produce two arrays of equal length\n # Call one value in the range of those arrays to index\n value = randint(0, len(curr_blanks[0])-1)\n\n self.maze[curr_blanks[0][value]][curr_blanks[1][value]] = self.goal_value\n self.goal_position = tuple([curr_blanks[0][value],curr_blanks[1][value]])", "def set_curr_value(self, val):\n # only goal that is in progress can have it's current value changed\n if self._status != EGoalStatus.IN_PROGRESS:\n raise NotImplementedError('Cannot set value to finished or not started goal')\n # try cast to int - mainly for QuantifiedGoal representation\n val = self.fw.types.try_float_cast(val)\n # update both in the stages object and in raw data\n self._values[EStage.CURRENT] = self._data_process(val)\n self._skeleton.curr_value = val\n # use progressor to update the database\n self._progressor.dump_to_database(self)", "def goal_space(self, val: Union[List[ObservationSpace], ObservationSpace]):\n self._goal_space = val", "def __init__(self, initial, goal=None):\n self.initial = initial\n self.goal = goal", "def on_goal_gripper(self, received_goal_handle):\n\t\t# Checks if the joints are just incorrect\n\t\tself.init_trajectory_gripper()\n\t\trospy.loginfo(\"Updating gripper goal\")\n\t\t\t\t\n\t\tif set(received_goal_handle.get_goal().trajectory.joint_names) != set(self.gripperprefixedJointNames):\n\t\t\trospy.logerr(\"Received a goal with incorrect joint names: (%s)\" %\n\t\t\t\t\t\t ', '.join(received_goal_handle.get_goal().trajectory.joint_names))\n\t\t\treceived_goal_handle.set_rejected()\n\t\t\treturn\n\n\t\tif not trajectory_is_finite(received_goal_handle.get_goal().trajectory):\n\t\t\trospy.logerr(\"Received a goal with infinites or NaNs\")\n\t\t\treceived_goal_handle.set_rejected(text=\"Received a goal with infinites or NaNs\")\n\t\t\treturn\n\n\t\t# Checks that the trajectory has velocities\n\t\tif not has_velocities(received_goal_handle.get_goal().trajectory):\n\t\t\trospy.logerr(\"Received a goal without velocities\")\n\t\t\treceived_goal_handle.set_rejected(text=\"Received a goal without velocities\")\n\t\t\treturn\n\n\t\t# Inserts the current setpoint at the head of the trajectory\n\t\tnow = self.robot.getTime()\n\t\tpoint0 = sample_trajectory(self.trajectory_gripper, now - self.trajectory_gripper_t0)\n\t\tpoint0.time_from_start = rospy.Duration(0.0)\n\t\treceived_goal_handle.get_goal().trajectory.points.insert(0, point0)\n\t\tself.trajectory_t0 = now\n\n\t\t# Replaces the goal\n\t\tself.received_goal_handle = received_goal_handle\n\t\tself.trajectory_gripper = received_goal_handle.get_goal().trajectory\n\t\treceived_goal_handle.set_accepted()", "def __init__(self, initial, goal):\n self.initial = initial; self.goal = goal" ]
[ "0.7807485", "0.7740968", "0.75928867", "0.753877", "0.7105755", "0.69478315", "0.6941798", "0.66204673", "0.6585629", "0.64913666", "0.63631916", "0.6287369", "0.62222", "0.6091264", "0.6044519", "0.59357023", "0.59132695", "0.5908332", "0.5907529", "0.58134097", "0.5791385", "0.577219", "0.57581496", "0.5735471", "0.5718995", "0.5672503", "0.5665708", "0.56615376", "0.56539637", "0.56468403" ]
0.7828492
0
Untraversed Board Setter This function stores the untraversed board configuration.
def set_untraversed_board(board): BoardPath._untraversed_board = board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resetBoard(self):\n pass", "def reset(self, board):", "def reset_board(self):\n\n self.board = np.array(self.initial_board)", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def set_traversed_board(board):\n BoardPath._traversed_board = board\n # Extract the size of the traversed board\n # This is stored and used in \"made_up\" heuristic analysis\n BoardPath._traversed_board_size = 0\n for board_row in board:\n BoardPath._traversed_board_size += len(board_row)", "def set_board(board):", "def setBoard(self, board):\n\t\tself.gameBoard = board", "def clean(self):\n self.board_values = np.zeros((self.size, self.size))\n self.tiles_taken[:, :] = False", "def save_board_state(self):\n self.board_states.append([copy.deepcopy(self.stock), copy.deepcopy(self.wp), \n copy.deepcopy(self.foundations), copy.deepcopy(self.tableaus)])", "def set_board(self, configuration):\n self._config = list(configuration)\n self._houses_num = len(configuration)\n \n tmp_houses = list(configuration)\n tmp_houses.reverse()\n \n #assigning houses in reverse order\n for house in tmp_houses:\n self._houses.append(house)", "def restore(self, count, curr_dict):\n count.backtracks += 1\n for cell_name in self.board:\n self.board[cell_name].possible_values = curr_dict[cell_name][0]\n self.board[cell_name].is_assigned = curr_dict[cell_name][1]\n self.board[cell_name].value = curr_dict[cell_name][2]", "def erase(self):\n\tself.state={}\n\tself.display(update_board=0)", "def reset_board():\n board = initial_state()\n emit(\"update\", board)", "def get_captured(self, board):\n self.value = 0\n board.figures.remove(self)", "def resets_attributes(self):\n \n self.path_dict = None\n self.poss_dict = None\n self.check_dict = None\n self.long_dict = None\n self.rep_counter = 0\n self.cap_counter = 0\n \n self.board = []\n self.coords = []\n self.chess_coords = []\n self.empty = \" \"", "def reset(self):\n self.board = np.zeros(shape = self.dim, dtype = int)\n self.current_player = 1\n self.steps = 0\n self.invalid_moves = 0\n self._is_done = False\n return self.board", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()", "def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]", "def reset_plugboard(self):\r\n self.plugboard1 = []\r\n self.plugboard2 = []", "def resetBoard(self):\n self.space1 = 0\n self.space2 = 0\n self.space3 = 0\n self.space4 = 0\n self.space5 = 0\n self.space6 = 0", "def reset(self):\n self.board = Board()\n self.winner = None", "def reset(self):\n self.board = place_mines(self.board_size, self.num_mines)\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n return self.my_board", "def deconfigure(self):\n\n pass", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def reset_board(board):\n for idx in board.keys():\n board[idx] = ' '\n return board", "def reset(self):\n self.my_board = np.ones((self.board_size, self.board_size), dtype=int) * CLOSED\n self.board = place_mines(self.board_size, self.num_mines)\n self.num_actions = 0\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=bool)\n\n return self.my_board", "def board(self):\n return copy.deepcopy(self._board)", "def _board_after_move_only(source, dest, board):\n new_board = deepcopy(board)\n x_old, y_old, x_new, y_new = source[0], source[1], dest[0], dest[1]\n new_board[x_new][y_new] = new_board[x_old][y_old]\n new_board[x_old][y_old] = 0\n return new_board", "def get_board(self):\n return copy.deepcopy(self.board)" ]
[ "0.65023744", "0.6442172", "0.63471204", "0.63091016", "0.63091016", "0.6104631", "0.6043555", "0.5902821", "0.58919376", "0.5850664", "0.58014584", "0.57706535", "0.57347137", "0.5697121", "0.56899226", "0.5621945", "0.5598106", "0.5597589", "0.5568787", "0.5554146", "0.5552708", "0.55496514", "0.5529557", "0.55133843", "0.5501008", "0.5496407", "0.5480772", "0.547848", "0.5468285", "0.5460405" ]
0.7495511
0
Heuristic Setter This function stores the board path heuristic
def set_heuristic(heuristic): BoardPath._heuristic = heuristic
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def update_heuristic(self):\n self.heuristic = self.manhattan_distance()", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def getPath(self, heuristic):\n\t\t# make sure the board isn't already solved\n\t\tif self.board.isSolved():\n\t\t\treturn None\n\n\t\t# make sure the given heuristic is a function that can be called\n\t\tif not callable(heuristic):\n\t\t\treturn None\n\t\tself.heuristic = heuristic\n\n\t\t# initialize data structures for A*\n\t\tqueue = PriorityQueue()\n\t\texplored = {self.board.hash: True}\n\n\t\t# populate queue with moves and nodes\n\t\tself.populateQueue(self.board, queue, AStarNode(None, None, None, 0, 0))\n\n\t\twhile not queue.empty():\n\t\t\t# get the next position from the queue\n\t\t\tcost, node = queue.get()\n\n\t\t\t# make sure this position hasn't been explored\n\t\t\tif node.graph.hash not in explored:\n\t\t\t\t# check if the board is solved\n\t\t\t\tif node.graph.isSolved():\n\t\t\t\t\treturn node.reconstructPath()\n\n\t\t\t\t# if not solved, add to explored and populate queue\n\t\t\t\texplored[node.graph.hash] = True\n\t\t\t\tself.populateQueue(node.graph, queue, node)\n\n\t\treturn None", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def use_manhatten_heur(self):\r\n\t\tdistance = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tintendedX, intendedY = BoardClass.goalTileLocations[self.board[row][col]]\r\n\t\t\t\tdistance += (abs(row - intendedX) + abs(col - intendedY))\r\n\r\n\t\tself.heuristic = distance", "def update(self, solution):\n self.heuristic_path = [i for i in self.initial_path if i in solution]\n self.heuristic_cost = self.pathCost(self.heuristic_path)", "def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total", "def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result", "def generate_heuristic(self):\r\n\t\tif self.hType == 0:\r\n\t\t\tself.use_displaced_heur()\r\n\r\n\t\telif self.hType == 1:\r\n\t\t\tself.use_manhatten_heur()", "def optimise(self):\n route = str(sorted(self.heuristic_path))\n\n if route in self.routes:\n saved = TSP.routes[route]\n self.heuristic_path = saved[\"path\"]\n self.heuristic_cost = saved[\"cost\"]\n else:\n self._optimise()\n\n return self.heuristic_path, self.heuristic_cost", "def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)", "def make_heuristic(self, heuristic_type: str, goal: Coordinates):\n raise NotImplementedError", "def _optimise(self):\n better = True\n self.solutions = set()\n\n # Rebuild the neighbours\n self.neighbours = {}\n\n for i in self.heuristic_path:\n self.neighbours[i] = []\n\n for j, dist in enumerate(TSP.edges[i]):\n if dist > 0 and j in self.heuristic_path:\n self.neighbours[i].append(j)\n\n # Restart the loop each time we find an improving candidate\n while better:\n better = self.improve()\n # Paths always begin at 0 so this should manage to find duplicate\n # solutions\n self.solutions.add(str(self.heuristic_path))\n\n self.save(self.heuristic_path, self.heuristic_cost)", "def astar_corner(maze):\n # TODO: Write your code here\n \"\"\"\n Plan:\n Do normal a* but then .clear visited after each new goal is found\n new h = Manhattan distance to the nearest goal and then the manhattan distance to the other goals starting from this nearest goal. \n new priority queue -- tuple (f, x&y, goals_left, \n \"\"\"\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n f = min_manhattan(goals, start)\n curr = (f, 0, f, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n #print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n #print(\"DONE\")\n #print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n #print(\"curr[6]: \", curr[6])\n #print(\"n: \", n)\n #print(\"curr[4]: \", curr[4])\n h2 = min_manhattan(curr[4], n)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n \n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def _get_heuristic(self, game):\r\n board = game._get_bord()\r\n player = game._current_player\r\n size = game._size\r\n\r\n # [1] The more pawns one has compared to the number of pawns\r\n # the opponent has, the better.\r\n\r\n count_delta = self._get_fields_delta(board, player)\r\n\r\n # [2] The further advanced a pawn, the better.\r\n # Free paths are great.\r\n\r\n adv_board = deepcopy(board)\r\n usr_now_blocked = [False] * size\r\n opp_now_blocked = [False] * size\r\n\r\n # Traversal of board backwards for performance reasons.\r\n # (free paths flags)\r\n # Of course this could also be done by flipping calculation of\r\n # the row indices. But that seems counterintuitive.\r\n for r in range(size - 1, -1, -1):\r\n for c in range(size):\r\n # Row indices for both perspectives.\r\n # We will be travelling the board from both ends\r\n # at the same time.\r\n r_opp = r\r\n r_usr = size - 1 - r\r\n\r\n # Perspective of Player.USER.\r\n if board[r_usr][c] == Player.OPP:\r\n # If this field is occupied by the Player.OPP\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.USER can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.USER's pawns less advanced.\r\n usr_now_blocked[c] = True\r\n elif board[r_usr][c] == Player.USER:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_usr][c] *= (r_usr - 1) * (r_usr - 1)\r\n\r\n if not usr_now_blocked[c]:\r\n adv_board[r_usr][c] *= 10 # TODO: choose best weight\r\n\r\n # Perspective of Player.OPP.\r\n if board[r_opp][c] == Player.USER:\r\n # If this field is occupied by the Player.USER\r\n # and since we are travelling the board from the final row\r\n # a pawn of the Player.OPP can reach,\r\n # we can set a flag to remember, that this col is now\r\n # blocked for all Player.OPP's pawns less advanced.\r\n opp_now_blocked[c] = True\r\n elif board[r_opp][c] == Player.OPP:\r\n # Evaluate the position of the Player.USER's pawn:\r\n # - the further advanced (given as value in r_usr),\r\n # the better.\r\n # - if the column ahead is free from Player.OPP's pawns,\r\n # gets a bonus.\r\n # To prevent each pawn from taking 2 fields as a first\r\n # step, subtracted 1 from value.\r\n adv_board[r_opp][c] *= (r_opp - 1) * (r_opp - 1)\r\n\r\n if not opp_now_blocked[c]:\r\n adv_board[r_opp][c] *= 10 # TODO: choose best weight\r\n\r\n adv_delta = self._get_fields_delta(adv_board, player)\r\n\r\n # We refrain from adjusting weights of both aspects. Could be\r\n # optimized by collecting data.\r\n return adv_delta + count_delta", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def use_displaced_heur(self):\r\n\t\tdisplacedTiles = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tif self.board[row][col] != BoardClass.goal[row][col]:\r\n\t\t\t\t\tdisplacedTiles += 1\r\n\r\n\t\tself.heuristic = displacedTiles", "def heuristic_function_hard(node, current_player):\n alpha = 25\n beta = 10\n gamma = 40\n delta = -200\n\n x, y = node.get_neutron_coordinates()\n\n # Calculates the distance between the neutron and the player's home rank\n # and calculates how many paths exist for the neutron to reach the player's home rank\n # as well as for the opponent\n if current_player == PLAYER_A:\n neutron_distance = y\n\n player_paths = get_number_of_paths(node, x, y, PLAYER_A)\n opponent_paths = get_number_of_paths(node, x, y, PLAYER_B)\n\n else:\n neutron_distance = node.board_size - y\n\n player_paths = get_number_of_paths(node, x, y, PLAYER_B)\n opponent_paths = get_number_of_paths(node, x, y, PLAYER_A) \n\n surrounding_soldiers = 0\n\n # Calculates the number of surrounding soldiers\n for j in range(-1, 2):\n for i in range(-1, 2):\n\n if x + i < 0 or x + i >= node.board_size or y + j < 0 or y + j >= node.board_size:\n continue\n elif node.board[y+j][x+i] == PLAYER_A_SOLDIER_CHAR or node.board[y+j][x+i] == PLAYER_B_SOLDIER_CHAR:\n surrounding_soldiers += 1\n\n return alpha*neutron_distance + beta*surrounding_soldiers + gamma*player_paths + delta*opponent_paths", "def calculate_made_up_dist(self):\n\n # Ensure if current state equals goal, cost is only the current cost\n if self._goal_loc == self._current_loc:\n return self._current_cost\n\n # Distance is at least the Manhattan distance as cannot move diagonal\n estimated_distance = self.calculate_manhattan_dist()\n\n # Assume two board parts in the priority queue have the same weight.\n # For those board paths with higher actual cost and lower heuristic\n # cost, there is more assurance in the accuracy of the actual cost\n # than in the heuristic cost. Give a very small penalty (i.e. less\n # than one step) to prefer a path with a higher known cost than a\n # path with a higher heuristic cost.\n # Extract the number of portion of the move cost from the heuristic\n heuristic_cost = estimated_distance - self._current_cost\n # Heuristic cost penalty is normalized to a maximum of 0.1 steps\n # This is achieved by dividing the heuristic cost by the size of the\n # board. Since the heuristic cost can never be larger than the board\n # size, this quotient is less than or equal to 1. To normalize to a\n # maximum of 0.1, just multiply the number by 0.1. This is than added\n # to the estimated distance determined so far.\n heuristic_cost_penalty = 0.1 * heuristic_cost\n heuristic_cost_penalty /= BoardPath._traversed_board_size\n # Add what is essentially an \"uncertainty penalty\"\n estimated_distance += heuristic_cost_penalty\n\n # In case where all neighboring spaces are blocked or already\n # traversed, then set the path cost prohibitively large so it is\n # given minimum priority.\n if not (self.is_move_valid(\"d\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"u\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"l\", BoardPath._traversed_board)) \\\n and not (self.is_move_valid(\"r\", BoardPath._traversed_board)):\n # Total board area is sufficient as a prohibitive distance\n estimated_distance += BoardPath._traversed_board_size\n return estimated_distance\n\n # If all next steps that load directly to the goal are blocked, then\n # it takes at least two additional moves to get around the blocked\n # paths it (due to an obstacle or already traversed square) so add\n # two to the estimated distance to include that cost.\n if self._is_all_direct_next_moves_blocked(BoardPath._traversed_board):\n estimated_distance += 2\n\n # In a heap, if two nodes have the same cost, the object that was\n # put into the heap first in many implementations will be on top of the\n # heap. To make the algorithm more efficient, apply a slight penalty to\n # a non valid solution to ensure if an invalid solution and a valid\n # solution have the same cost that the valid solution would always be\n # on top of the heap. This is done by giving all non-valid solutions a\n # penalty term that is greater than zero and less than the minimum step\n # size (e.g. in this case 0 < 0.1 < 1).\n estimated_distance += 0.1\n\n # Return estimated distance\n return estimated_distance", "def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h", "def loads_pathways(self, turn):\n black_coords, white_coords = self.parser()\n counter = 0\n path_dict, poss_dict, check_dict, long_dict = {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}, {BLACK : [], WHITE : []}\n \n for i in self.board:\n if i != self.empty:\n if i.colour == WHITE:\n path, poss, checked_path, long_path = i.available_moves(self.board, white_coords[counter], WHITE, self.coords[self.board.index(i)])\n counter += 1\n if path != None and path != []:\n path_dict[WHITE] += path\n if poss != None and poss != []:\n poss_dict[WHITE] += poss\n if checked_path != []:\n check_dict[WHITE] += (checked_path)\n if long_path != []:\n long_dict[WHITE] += long_path\n\n counter = 0\n\n for i in self.board:\n if i != self.empty:\n if i.colour == BLACK:\n path, poss, checked_path, long_path = i.available_moves(self.board, black_coords[counter], BLACK, self.coords[self.board.index(i)])\n counter += 1\n if path != None and path != []:\n path_dict[BLACK] += path\n if poss != None and poss != []:\n poss_dict[BLACK] += poss\n if checked_path != []:\n check_dict[BLACK] += (checked_path)\n if long_path != []:\n long_dict[BLACK] += long_path \n \n self.path_dict = path_dict\n self.poss_dict = poss_dict\n self.check_dict = check_dict\n self.long_dict = long_dict", "def get_heuristic(self):\n return self._heuristic", "def _select_heuristic(self):\n\n return None", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n frontier = util.PriorityQueue() # in heap stored as ( cost,priority,location)\n frontier.push(start, 0)\n explored = []\n\n location = 0 # to remember which successor part im accessing\n action = 1\n heap_location = 2\n cost = 2\n\n history = []\n total_cost = 0 # need something to process total path cost\n\n while not frontier.isEmpty():\n\n current_position = frontier.pop()\n if problem.isGoalState(current_position):\n break\n if current_position not in explored:\n explored.append(current_position)\n else:\n continue\n\n for path in problem.getSuccessors(current_position):\n # if path[location] not in explored: # hasn't been expanded from\n if path[location] not in [item[heap_location] for item in frontier.heap]: # if not in frontier\n # print(\"valid successor (no frontier)\", each_successor[location])\n\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n heuristic_cost = total_cost + heuristic(path[location], problem)\n frontier.push(path[location], path[cost] + total_cost + heuristic_cost)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n else:\n # print(\"in frontier\")\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n frontier.update(path[location], total_cost + path[cost])\n # should prob add something that goes through history and wipes old entry for that point\n for entry in history:\n if entry['To'] == path[location] and entry['Cost'] > total_cost + path[cost]:\n history.remove(entry)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n break\n while not problem.isGoalState(history[-1]['To']): # loop removes last couple of movements which don't lead to goal\n history.remove(history[-1])\n\n x = len(history)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if history[x - 1]['From'] != history[x - 2]['To']: # starts from goal and works backwards\n history.remove(history[x - 2])\n x = len(history)\n else:\n x -= 1\n\n return [path['By'] for path in history]", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n same as UCS function, but total cost is sum of cost till now , cost to the child node and \n cost to the goal state (heuristic function)\n \"\"\"\n fringes = util.PriorityQueue()\n explored =set()\n fringes.push((problem.getStartState(),[]),0)\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n finalPath = currDir\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n totalCost = (childNode[2] + heuristic(childNode[0],problem)+problem.getCostOfActions(currDir))\n fringes.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n return finalPath\n\n\n\n\n\n\n\n util.raiseNotDefined()", "def evaluate(self, board):\r\n\r\n self_moves = self.find_possible_moves(board, self.my_color)\r\n opponent_moves = self.find_possible_moves(board, self.opponent_color)\r\n\r\n mobility = 0 # Mobility captures Self's profit in amount of available moves\r\n disk_parity = 0 # Disk parity captures Self's profit in raw disk amount\r\n corners = 0 # Corners captures Self's profit in occupied corners\r\n corner_proximity = 0 # Corner proximity captures the risk of giving away a free corner\r\n stability = 0 # Stability captures Self's profit in unflippable disks\r\n\r\n # Calculating mobility heuristic\r\n self_immediate_mobility = len(self_moves)\r\n opponent_immediate_mobility = len(opponent_moves)\r\n\r\n if self_immediate_mobility + opponent_immediate_mobility != 0:\r\n mobility = 100 * (self_immediate_mobility - opponent_immediate_mobility) / (self_immediate_mobility + opponent_immediate_mobility)\r\n\r\n # Calculate disk parity heuristic\r\n self_disks = self.get_disk_count(self.my_color, board)\r\n opponent_disks = self.get_disk_count(self.opponent_color, board)\r\n\r\n disk_parity = 100 * (self_disks - opponent_disks) / (self_disks + opponent_disks)\r\n\r\n # Calculating corner heuristic\r\n corners_list = [(0,0), (0,7), (7,0), (7,7)]\r\n self_corners = 0\r\n opponent_corners = 0\r\n\r\n for corner in corners_list:\r\n if board[corner[0]][corner[1]] == self.my_color:\r\n self_corners += 1\r\n if board[corner[0]][corner[1]] == self.opponent_color:\r\n opponent_corners += 1\r\n\r\n if self_corners + opponent_corners != 0:\r\n corners = 100 * (self_corners - opponent_corners) / (self_corners + opponent_corners)\r\n\r\n # Calculating corner proximity heuristic\r\n corners_proximity_list = [(0, 1), (1, 0), (1, 1), (0, 6), (1, 6), (1, 7), (6, 0), (6, 1), (7, 1), (6, 6), (7, 6), (6, 7)]\r\n self_corner_proximity = 0\r\n opponent_corner_proximity = 0\r\n\r\n for cell in corners_proximity_list:\r\n if board[cell[0]][cell[1]] == self.my_color:\r\n self_corner_proximity += 1\r\n if board[cell[0]][cell[1]] == self.opponent_color:\r\n opponent_corner_proximity += 1\r\n\r\n if self_corner_proximity + opponent_corner_proximity != 0:\r\n corner_proximity = 100 * (self_corner_proximity - opponent_corner_proximity) / (self_corner_proximity + opponent_corner_proximity)\r\n\r\n # Calculating stability heuristic\r\n self_stability = self.get_stable_disks(board, self.my_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 7))\r\n\r\n opponent_stability = self.get_stable_disks(board, self.opponent_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 7))\r\n\r\n if self_stability + opponent_stability != 0:\r\n stability = 100 * (self_stability - opponent_stability) / (self_stability + opponent_stability)\r\n\r\n # Calculating the final value\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n # In early-game, focus on maximal mobility and stability. Avoid amassing too many disks.\r\n if disk_total < 15:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 30 * mobility + \\\r\n 30 * stability\r\n\r\n # In mid-game, focus on capturing corners and further building stability\r\n elif disk_total < 45:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 20 * mobility + \\\r\n 35 * stability\r\n\r\n # In late-game, focus on getting as many discs as possible\r\n else:\r\n heuristic_value = 30 * corners + \\\r\n 15 * mobility + \\\r\n 30 * stability + \\\r\n 35 * disk_parity\r\n\r\n return heuristic_value" ]
[ "0.72173494", "0.7166709", "0.68784773", "0.68497974", "0.6678026", "0.6647084", "0.6623912", "0.6621537", "0.6619162", "0.65451026", "0.6475537", "0.6408203", "0.6355258", "0.631753", "0.62351453", "0.6221724", "0.6215816", "0.6163034", "0.61629605", "0.61505026", "0.61504894", "0.6143098", "0.61388594", "0.61387783", "0.6131147", "0.6114502", "0.61005", "0.6090942", "0.60811496", "0.6064201" ]
0.77581763
0
Less Than Operator Less than operator used for the distance of two BoardPath objects.
def __lt__(self, other): return self.get_distance() < other.get_distance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __lt__(self, other):\n return self.lessThan(other)", "def less(lhs, rhs):\n return _make.less(lhs, rhs)", "def __lt__(self, other):\n return self.dist_from_source < other.dist_from_source", "def __lt__(self, other):\n return self.abs2phy.__lt__(other)", "def __lt__(self, other):\n return self.abs2phy.__lt__(other)", "def __lt__(self, other):\n return self.x ** 2 + self.y ** 2 < other.x ** 2 + other.y ** 2", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def __lt__(self, other):\n # Prioritize depth (as seen in https://gatech.instructure.com/courses/60478/external_tools/81).\n return (self._lower_bound / len(self._path)) < (other._lower_bound / len(other._path))\n # Prioritize breadth.\n # return self._lower_bound < other._lower_bound", "def less_than(self) -> global___Expression:", "def less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)", "def lt(self, x, y):\n return self.le(x,y) and x != y", "def __lt__(self, other):\n return less(self, other)", "def test_less_than_bcast(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def lt (x,y):\n\n return not le(y,x)", "def __le__(self, other):\n return self.lessThanOrEqual(other)", "def __lt__(self, other):\n return self.weight() < other.weight()", "def __lt__(self, other):\n return (self.cost + self.heuristic) < (other.cost + other.heuristic)", "def __lt__(self, other):\n return self.weight < other.weight", "def __lt__(self, other):\n\n # seems like this should be == -1 but we're using a min heap\n return self._comparator.compare_measurements(self, other) == 1", "def __lt__(self, other: 'LTL'):\n lt = self <= other\n neq = self != other\n return lt and neq", "def __lt__(self, other):\n return self.y < other.y or (\n not self.y > other.y and\n self.x < other.x\n )", "def __lt__(self, other):\n return self._value < other.value_in_unit(self.unit)", "def __lt__(self, other):\n\t\tselfAttrs = (self.inflatedCost, self.label.winery.name, self.label.name, self.label.vintage)\n\t\totherAttrs = (other.inflatedCost, other.label.winery.name, other.label.name, other.label.vintage)\n\t\treturn selfAttrs < otherAttrs", "def __lt__(self, other):\r\n return self.estimateCost < other.estimateCost", "def __lt__(self, other):\n try:\n return self.length2 < other.length2\n except AttributeError:\n return assert_unorderable(self, other)", "def __lt__(self, other):\n return sum([node.h for node in self.nodes]) + self.cost < sum([node.h for node in other.nodes]) + other.cost", "def __lt__(self, *args):\n return _ida_hexrays.operand_locator_t___lt__(self, *args)", "def __lt__(self, other):\n return self._ss_d < other._ss_d", "def assert_less(self, a, b):\n if not a < b:\n raise AssertionError('%s not less than %s' % (str(a), str(b)))", "def __lt__(self, secondPoint):\n return self.value < secondPoint.value" ]
[ "0.70282334", "0.69592035", "0.6899089", "0.68775755", "0.68775755", "0.6856517", "0.6844349", "0.6821911", "0.67643285", "0.6722422", "0.6687183", "0.6684212", "0.6676479", "0.6667317", "0.6630886", "0.66158414", "0.6570471", "0.65577924", "0.65521175", "0.65098625", "0.6481857", "0.64767075", "0.64710796", "0.64705867", "0.6449059", "0.6446536", "0.6416788", "0.63992256", "0.63579166", "0.63578236" ]
0.7412242
0
Row Number Accessor Accessor to get the row number of this location.
def get_row(self): return self._row_number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def row_counter(self) -> int:\n return self.writer.row_counter", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def row(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"row\")", "def get_nrows(self):\n return self._nrows", "def get_nrows(self):\n return self.nrows", "def get_nrows(self):\n return self.nrows", "def get_row_number(self):\n return int(len(self.data_items)/12)", "def line_number(self):\n return self._line_number", "def line_no(self):\n return self._line_no", "def row_count(self) -> str:\n return self.__row_count", "def row_count(self):\n return self._row_count", "def row(self):\n\t\tif self._parent != None:\n\t\t\treturn self._parent._children.index(self)\n\t\telse:\n\t\t\treturn 0", "def row_count(self):\n return self.__row_count", "def line_num(self):\n if not self.resolved:\n self._resolve_reader()\n\n if isinstance(self.resolved, WriterType):\n raise AttributeError('CSV Writer object has no attribute line_num')\n\n return self.resolved.line_num", "def get_row_index(self):\n for row in range(self.model.rowCount()):\n name_item = self.model.item(row, self.COL_NAME)\n fullpath = name_item.data(self.ROLE_FULLPATH)\n if fullpath == self.filepath:\n return row", "def lineno(self):\n return self._lineno", "def __get_row(self, index: int) -> int:\n return index // self.columns" ]
[ "0.75195545", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74487907", "0.74372005", "0.74104637", "0.74104637", "0.7353418", "0.7310405", "0.7054059", "0.69452375", "0.69098055", "0.69055635", "0.68793154", "0.68637764", "0.68410707", "0.6810289", "0.67826873" ]
0.81669563
0
Column Number Accessor Accessor to get the column number of this location.
def get_column(self): return self._column_number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def column(self) -> int:\n return self._column", "def get_column(self, pos, **opts):\n lnum, cnum = self._get_column(pos, **opts)\n return lnum + self.LINE_NUM_BASE, cnum + self.COLUMN_NUM_BASE", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self): \r\n\r\n return self._column", "def _get_column(self, pos, **opts):\n lnum, cpos = self._get_linepos(pos)\n start, end = self._get_linespan(lnum)\n return lnum, self._pos2col(start, cpos, **opts)", "def _get_column_offset(self, lnum, colnum, **opts):\n start, end = self._get_linespan(lnum)\n length = end - start\n cpos = self._col2pos(start, colnum, **opts)\n if cpos < 0 or cpos >= length:\n raise IndexError(\"column out of bounds\")\n\n return start + cpos", "def column(self):\n return self[\"column\"]", "def col(self):\n return self.address.col", "def get_column_offset(self, lnum, colnum):\n return self._get_column_offset(lnum - self.LINE_NUM_BASE,\n colnum - self.COLUMN_NUM_BASE)", "def getColumn(self):\n return _libsbml.SBase_getColumn(self)", "def col_count(self):\n return self.__col_count", "def get_columns(self) -> int:\r\n return 1 + self.display.get_columns() + 1", "def column_index(self, column_name: str) -> int:\n return self._column_indices[column_name]", "def columnCount(self, parent_midx):\n return self._cols_nb", "def columnCount(self, parent_midx):\n return self._cols_nb", "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None" ]
[ "0.784214", "0.7419771", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.7389335", "0.71282494", "0.6932579", "0.6846213", "0.68336886", "0.6810449", "0.67918617", "0.6779231", "0.6759096", "0.6752048", "0.6750021", "0.67478293", "0.67478293", "0.6707134" ]
0.82080346
0
Read in molecule(s) (and conformers, if present) in insdf file. Create Psi4 input calculations for each structure.
def confs_to_psi(insdf, method, basis, calctype='opt', memory=None, via_json=False): wdir = os.getcwd() # open molecules molecules = reader.read_mols(insdf) ### For each molecule: for each conf, generate input for mol in molecules: print(mol.GetTitle(), mol.NumConfs()) if not mol.GetTitle(): sys.exit("ERROR: OEMol must have title assigned! Exiting.") for i, conf in enumerate(mol.GetConfs()): # change into subdirectory ./mol/conf/ subdir = os.path.join(wdir, "%s/%s" % (mol.GetTitle(), i + 1)) if not os.path.isdir(subdir): os.makedirs(subdir) if os.path.exists(os.path.join(subdir, 'input.dat')): print("Input file already exists. Skipping.\n{}\n".format( os.path.join(subdir, 'input.dat'))) continue label = mol.GetTitle() + '_' + str(i + 1) if via_json: ofile = open(os.path.join(subdir, 'input.py'), 'w') ofile.write("# molecule {}\n\nimport numpy as np\nimport psi4" "\nimport json\n\njson_data = ".format(label)) json.dump( make_psi_json(conf, label, method, basis, calctype, memory), ofile, indent=4, separators=(',', ': ')) ofile.write( "\njson_ret = psi4.json_wrapper.run_json(json_data)\n\n") ofile.write("with open(\"output.json\", \"w\") as ofile:\n\t" "json.dump(json_ret, ofile, indent=2)\n\n") else: ofile = open(os.path.join(subdir, 'input.dat'), 'w') ofile.write( make_psi_input(conf, label, method, basis, calctype, memory)) ofile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_single_sdf_to_pdb(pdb_subfolder_path, sdf_file_path):\n\n if os.path.exists(sdf_file_path) is True:\n\n file_basename = basename(sdf_file_path)\n file_basename = file_basename.split(\"__input1\")[0]\n\n file_output_name = \"{}{}_\".format(pdb_subfolder_path, file_basename)\n\n try:\n mols = Chem.SDMolSupplier(\n sdf_file_path, sanitize=False, removeHs=False, strictParsing=False\n )\n except:\n mols = None\n\n # if mols is None rdkit couldn't import the sdf so we will not do anything else\n if mols is None:\n pass\n elif len(mols) == 0:\n pass\n else:\n try:\n mols_no_hydrogen = Chem.SDMolSupplier(\n sdf_file_path, sanitize=True, removeHs=True, strictParsing=False\n )\n except:\n mols_no_hydrogen = [None for x in range(0, len(mols))]\n\n # if len(mols)==0 gypsum output a blank file by accident\n # if mols is None rdkit couldn't import the sdf\n if len(mols) != 0:\n counter = 0\n for i in range(0, len(mols)):\n mol = mols[i]\n # Extra precaution to prevent None's within a set of good\n # mols\n if mol is None:\n continue\n\n mol = MOH.check_sanitization(mol)\n # Filter out any which failed\n if mol is None:\n continue\n\n # pdb_name indexed to 1\n pdb_name = \"{}_{}.pdb\".format(file_output_name, counter + 1)\n if mol is not None: # For extra precaution...\n Chem.MolToPDBFile(mol, pdb_name, flavor=32)\n # Add header to PDB file with SMILES containing\n # protanation and stereochem\n\n no_hydrogen_smiles = mols_no_hydrogen[i]\n if no_hydrogen_smiles is None:\n no_hydrogen_smiles = Chem.MolToSmiles(mol)\n\n if no_hydrogen_smiles is None:\n print(\"SMILES was None for: \", pdb_name)\n printout = \"REMARK Final SMILES string: {}\\n\".format(\"None\")\n elif type(no_hydrogen_smiles) == str:\n printout = \"REMARK Final SMILES string: {}\\n\".format(\n no_hydrogen_smiles\n )\n elif type(no_hydrogen_smiles) == type(Chem.MolFromSmiles(\"C\")):\n printout = \"REMARK Final SMILES string: {}\\n\".format(\n Chem.MolToSmiles(no_hydrogen_smiles)\n )\n\n with open(pdb_name) as f:\n printout = printout + f.read()\n with open(pdb_name, \"w\") as f:\n f.write(printout)\n printout = \"\"\n\n counter = counter + 1\n else:\n pass", "def import_sdf(self, fname):\n self.ftype = 'sdf'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = int(lines[3].split()[0])\n self.n_connect = int(lines[3].split()[1])\n self.sym = []\n self.at_num = []\n self.xyz = np.zeros((self.n_atom, 3))\n for i, line in enumerate(lines[4:4+self.n_atom]):\n tmp = line.split()\n self.sym.append(tmp[3])\n self.at_num.append(self.sym2num(tmp[3]))\n self.xyz[i, 0] = float(tmp[0])\n self.xyz[i, 1] = float(tmp[1])\n self.xyz[i, 2] = float(tmp[2])\n self.connect = np.zeros((self.n_connect, 2))\n for i, line in enumerate(lines[4+self.n_atom:4+self.n_atom+self.n_connect]):\n tmp = line.split()\n self.connect[i, 0] = tmp[0]\n self.connect[i, 1] = tmp[1]", "def generate_input(\n self,\n optimise=False,\n hessian=False,\n density=False,\n energy=False,\n fchk=False,\n restart=False,\n execute=True,\n ):\n\n setters = \"\"\n tasks = \"\"\n\n if energy:\n append_to_log(\"Writing psi4 energy calculation input\")\n tasks += f\"\\nenergy('{self.molecule.theory}')\"\n\n if optimise:\n append_to_log(\"Writing PSI4 optimisation input\", \"minor\")\n setters += f\" g_convergence {self.molecule.convergence}\\n GEOM_MAXITER {self.molecule.iterations}\\n\"\n tasks += f\"\\noptimize('{self.molecule.theory.lower()}')\"\n\n if hessian:\n append_to_log(\"Writing PSI4 Hessian matrix calculation input\", \"minor\")\n setters += \" hessian_write on\\n\"\n\n tasks += f\"\\nenergy, wfn = frequency('{self.molecule.theory.lower()}', return_wfn=True)\"\n\n tasks += \"\\nwfn.hessian().print_out()\\n\\n\"\n\n if density:\n raise NotImplementedError(\n \"Due to PSI4 requiring a box size which cannot be automatically generated, \"\n \"PSI4 cannot currently be used for density calculations. Please use Gaussian \"\n \"instead.\"\n )\n # append_to_log('Writing PSI4 density calculation input', 'minor')\n # setters += \" cubeprop_tasks ['density']\\n\"\n #\n # overage = get_overage(self.molecule.name)\n # setters += ' CUBIC_GRID_OVERAGE [{0}, {0}, {0}]\\n'.format(overage)\n # setters += ' CUBIC_GRID_SPACING [0.13, 0.13, 0.13]\\n'\n # tasks += f\"grad, wfn = gradient('{self.molecule.theory.lower()}', return_wfn=True)\\ncubeprop(wfn)\"\n\n if fchk:\n append_to_log(\"Writing PSI4 input file to generate fchk file\")\n tasks += f\"\\ngrad, wfn = gradient('{self.molecule.theory.lower()}', return_wfn=True)\"\n tasks += \"\\nfchk_writer = psi4.core.FCHKWriter(wfn)\"\n tasks += f'\\nfchk_writer.write(\"{self.molecule.name}_psi4.fchk\")\\n'\n\n # if self.molecule.solvent:\n # setters += ' pcm true\\n pcm_scf_type total\\n'\n # tasks += '\\n\\npcm = {'\n # tasks += '\\n units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Chloroform\\n }'\n # tasks += '\\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit'\n # tasks += '\\n }\\n}'\n\n setters += \"}\\n\"\n\n if not execute:\n setters += f\"set_num_threads({self.molecule.threads})\\n\"\n\n # input.dat is the PSI4 input file.\n with open(\"input.dat\", \"w+\") as input_file:\n # opening tag is always writen\n input_file.write(\n f\"memory {self.molecule.memory} GB\\n\\nmolecule {self.molecule.name} {{\\n\"\n f\"{self.molecule.charge} {self.molecule.multiplicity} \\n\"\n )\n # molecule is always printed\n for i, atom in enumerate(self.molecule.coordinates):\n input_file.write(\n f\" {self.molecule.atoms[i].atomic_symbol} \"\n f\"{float(atom[0]): .10f} {float(atom[1]): .10f} {float(atom[2]): .10f} \\n\"\n )\n\n input_file.write(\n f\" units angstrom\\n no_reorient\\n}}\\n\\nset {{\\n basis {self.molecule.basis}\\n\"\n )\n\n input_file.write(setters)\n input_file.write(tasks)\n\n if execute:\n with open(\"log.txt\", \"w+\") as log:\n try:\n sp.run(\n f\"psi4 input.dat -n {self.molecule.threads}\",\n shell=True,\n stdout=log,\n stderr=log,\n check=True,\n )\n except sp.CalledProcessError as exc:\n raise PSI4Error(\n \"PSI4 did not execute successfully check log file for details.\"\n ) from exc\n\n # Now check the exit status of the job\n return self.check_for_errors()\n\n else:\n return {\"success\": False, \"error\": \"Not run\"}", "def molToPsi4(self):\n mol = self.molfile\n mol = Chem.AddHs(mol)\n AllChem.EmbedMolecule(mol, useExpTorsionAnglePrefs=True, useBasicKnowledge=True)\n AllChem.UFFOptimizeMolecule(mol)\n atoms = mol.GetAtoms()\n string = string = \"\\n\"\n for i, atom in enumerate(atoms):\n pos = mol.GetConformer().GetAtomPosition(atom.GetIdx())\n string += \"{} {} {} {}\\n\".format(atom.GetSymbol(), pos.x, pos.y, pos.z)\n string += \"units angstrom\\n\"\n return string, mol", "def test_species_to_sdf_file(self):\n path = os.path.join(ARC_PATH, 'arc', 'testing', 'mol.sdf')\n spc = ARCSpecies(label='NCC', smiles='NCC')\n converter.species_to_sdf_file(spc, path)\n with open(path, 'r') as f:\n sdf_content = f.read()\n expected_sdf = \"\"\"\n RDKit 3D\n\n 10 9 0 0 0 0 0 0 0 0999 V2000\n 1.1517 -0.3760 -0.5231 N 0 0 0 0 0 0 0 0 0 0 0 0\n 0.2893 0.4500 0.3115 C 0 0 0 0 0 0 0 0 0 0 0 0\n -1.1415 -0.0561 0.2592 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1.1386 -1.3376 -0.1854 H 0 0 0 0 0 0 0 0 0 0 0 0\n 2.1151 -0.0555 -0.4352 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.6517 0.4342 1.3447 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.3279 1.4855 -0.0414 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.2133 -1.0839 0.6308 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.7870 0.5726 0.8809 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.5327 -0.0332 -0.7636 H 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0\n 1 4 1 0\n 1 5 1 0\n 2 3 1 0\n 2 6 1 0\n 2 7 1 0\n 3 8 1 0\n 3 9 1 0\n 3 10 1 0\nM END\n$$$$\n\"\"\"\n self.assertEqual(sdf_content, expected_sdf)", "def optimised_structure(self):\n\n # Run through the file and find all lines containing '==> Geometry', add these lines to a list.\n # Reverse the list\n # from the start of this list, jump down to the first atom and set this as the start point\n # Split the row into 4 columns: centre, x, y, z.\n # Add each row to a matrix.\n # Return the matrix.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n # Will contain index of all the lines containing '==> Geometry'.\n geo_pos_list = []\n for count, line in enumerate(lines):\n if \"==> Geometry\" in line:\n geo_pos_list.append(count)\n\n elif \"**** Optimization is complete!\" in line:\n opt_pos = count\n opt_steps = int(line.split()[5])\n\n if not (opt_pos and opt_steps):\n raise EOFError(\n \"According to the output.dat file, optimisation has not completed.\"\n )\n\n # now get the final opt_energy\n opt_energy = float(lines[opt_pos + opt_steps + 7].split()[1])\n\n # Set the start as the last instance of '==> Geometry'.\n start_of_vals = geo_pos_list[-1] + 9\n\n opt_struct = []\n\n for row in range(len(self.molecule.atoms)):\n\n # Append the first 4 columns of each row, converting to float as necessary.\n struct_row = []\n for indx in range(3):\n struct_row.append(\n float(lines[start_of_vals + row].split()[indx + 1])\n )\n\n opt_struct.append(struct_row)\n\n return np.array(opt_struct), opt_energy", "def format_molecule_for_psi4(self):\n text = 'molecule mol {\\n'\n for line in self.create_psi4_string_from_molecule().splitlines():\n text += ' ' + line + '\\n'\n text += '}\\n'\n return text", "def test_select_sdf_mol2(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = \"\"\"\n ---\n options:\n output_dir: {}\n setup_dir: .\n molecules:\n sdf_0:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n select: 0\n sdf_1:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n select: 1\n mol2_0:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n select: 0\n mol2_1:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n select: 1\n \"\"\".format(tmp_dir, self.sdf_path, self.sdf_path, self.mol2_path, self.mol2_path)\n yaml_content = textwrap.dedent(yaml_content)\n exp_builder = ExperimentBuilder(yaml_content)\n\n for extension in ['sdf', 'mol2']:\n multi_path = getattr(self, extension + '_path')\n for model_idx in [0, 1]:\n mol_id = extension + '_' + str(model_idx)\n\n # The molecule now is neither set up nor processed\n is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)\n assert is_setup is False\n assert is_processed is False\n\n exp_builder._db._setup_molecules(mol_id)\n\n # The setup of the molecule must isolate the frame in a single-frame PDB\n single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,\n mol_id, mol_id + '.' + extension)\n assert os.path.exists(os.path.join(single_mol_path))\n assert os.path.getsize(os.path.join(single_mol_path)) > 0\n if extension == 'mol2':\n # OpenEye loses the resname when writing a mol2 file.\n mol2_file = utils.Mol2File(single_mol_path)\n assert len(list(mol2_file.resnames)) == 1\n assert mol2_file.resname != '<0>'\n\n # sdf files must be converted to mol2 to be fed to antechamber\n if extension == 'sdf':\n single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,\n mol_id, mol_id + '.mol2')\n assert os.path.exists(os.path.join(single_mol_path))\n assert os.path.getsize(os.path.join(single_mol_path)) > 0\n\n # Check antechamber parametrization\n single_mol_path = os.path.join(tmp_dir, pipeline.SetupDatabase.MOLECULES_DIR,\n mol_id, mol_id + '.gaff.mol2')\n assert os.path.exists(os.path.join(single_mol_path))\n assert os.path.getsize(os.path.join(single_mol_path)) > 0\n\n # The positions must be approximately correct (antechamber move the molecule)\n selected_oe_mol = utils.load_oe_molecules(single_mol_path, molecule_idx=0)\n selected_pos = utils.get_oe_mol_positions(selected_oe_mol)\n second_oe_mol = utils.load_oe_molecules(multi_path, molecule_idx=model_idx)\n second_pos = utils.get_oe_mol_positions(second_oe_mol)\n assert selected_oe_mol.NumConfs() == 1\n assert np.allclose(selected_pos, second_pos, atol=1e-1)\n\n # The molecule now both set up and processed\n is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)\n assert is_setup is True\n assert is_processed is True\n\n # A new instance of ExperimentBuilder is able to resume with correct molecule\n exp_builder = ExperimentBuilder(yaml_content)\n is_setup, is_processed = exp_builder._db.is_molecule_setup(mol_id)\n assert is_setup is True\n assert is_processed is True", "def from_sdf(mol_fn):\n from ase import Atoms\n\n with open(mol_fn) as mol_f:\n mol_data = mol_f.readlines()\n\n coord_section = [l for l in mol_data if len(l.split()) == 16]\n atom_symbols = [l.split()[3] for l in coord_section]\n str_coords = [l.split()[:3] for l in coord_section]\n coords = [map(float, atom_coords) for atom_coords in str_coords]\n\n return Atoms(symbols=atom_symbols, positions=coords)", "def sdfToMol(sdf):\n suppl_data = Chem.SDMolSupplier(sdf)\n ms = [x for x in suppl_data if x is not None] # Filter data from sdf suppl_data\n return ms", "def generate_input(self, input_type='input', optimise=False, hessian=False, density=False, energy=False,\n fchk=False, run=True):\n\n molecule = self.molecule.molecule[input_type]\n\n setters = ''\n tasks = ''\n\n # input.dat is the PSI4 input file.\n with open('input.dat', 'w+') as input_file:\n # opening tag is always writen\n input_file.write(f\"memory {self.qm['threads']} GB\\n\\nmolecule {self.molecule.name} {{\\n{self.charge} {self.multiplicity} \\n\")\n # molecule is always printed\n for atom in molecule:\n input_file.write(f' {atom[0]} {float(atom[1]): .10f} {float(atom[2]): .10f} {float(atom[3]): .10f} \\n')\n input_file.write(f\" units angstrom\\n no_reorient\\n}}\\n\\nset {{\\n basis {self.qm['basis']}\\n\")\n\n if energy:\n append_to_log('Writing psi4 energy calculation input')\n tasks += f\"\\nenergy = energy('{self.qm['theory']}')\"\n\n if optimise:\n append_to_log('Writing PSI4 optimisation input', 'minor')\n setters += f\" g_convergence {self.qm['convergence']}\\n GEOM_MAXITER {self.qm['iterations']}\\n\"\n tasks += f\"\\noptimize('{self.qm['theory'].lower()}')\"\n\n if hessian:\n append_to_log('Writing PSI4 Hessian matrix calculation input', 'minor')\n setters += ' hessian_write on\\n'\n\n tasks += f\"\\nenergy, wfn = frequency('{self.qm['theory'].lower()}', return_wfn=True)\"\n\n tasks += '\\nwfn.hessian().print_out()\\n\\n'\n\n if density:\n append_to_log('Writing PSI4 density calculation input', 'minor')\n setters += \" cubeprop_tasks ['density']\\n\"\n\n overage = get_overage(self.molecule.name)\n setters += \" CUBIC_GRID_OVERAGE [{0}, {0}, {0}]\\n\".format(overage)\n setters += \" CUBIC_GRID_SPACING [0.13, 0.13, 0.13]\\n\"\n tasks += f\"grad, wfn = gradient('{self.qm['theory'].lower()}', return_wfn=True)\\ncubeprop(wfn)\"\n\n if fchk:\n append_to_log('Writing PSI4 input file to generate fchk file')\n tasks += f\"\\ngrad, wfn = gradient('{self.qm['theory'].lower()}', return_wfn=True)\"\n tasks += '\\nfchk_writer = psi4.core.FCHKWriter(wfn)'\n tasks += f'\\nfchk_writer.write(\"{self.molecule.name}_psi4.fchk\")\\n'\n\n # TODO If overage cannot be made to work, delete and just use Gaussian.\n # if self.qm['solvent']:\n # setters += ' pcm true\\n pcm_scf_type total\\n'\n # tasks += '\\n\\npcm = {'\n # tasks += '\\n units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Chloroform\\n }'\n # tasks += '\\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit'\n # tasks += '\\n }\\n}'\n\n setters += '}\\n'\n\n if not run:\n setters += f'set_num_threads({self.qm[\"threads\"]})\\n'\n\n input_file.write(setters)\n input_file.write(tasks)\n\n if run:\n sub_run(f'psi4 input.dat -n {self.qm[\"threads\"]}', shell=True)", "def ReadMaterialInfoAndMakeMaterialsMolFiles(OutputPath,MaterialPathList,CutOff,Multiple,PDBCharges,MaterialInputFormat):\r\n\r\n MaterialInfoList=[]\r\n ID=0\r\n Time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\r\n\r\n if os.path.exists(os.path.join(OutputPath,'Mols')): pass\r\n else: os.mkdir(os.path.join(OutputPath,'Mols'))\r\n\r\n for Range,MaterialPath in enumerate(MaterialPathList):\r\n\r\n\r\n AtomNum=0\r\n MaterialsCharge = 0.0\r\n MaterialInfo,ElementList,CellLength,CellAngle,AtomInfo,AtomInfoList,CellSize=[],[],[],[],[],[],[]\r\n\r\n MaterialName=os.path.splitext(MaterialPath.split('/')[-1])[0]\r\n MaterialInfo.append(MaterialName)\r\n MaterialNewName=re.sub('\\W','',MaterialName)\r\n\r\n if len(MaterialNewName) > 20:\r\n ID+=1\r\n MaterialNewName=MaterialNewName[:20]\r\n MaterialNewName=MaterialNewName+'_ID_'+str(ID)\r\n\r\n if MaterialInputFormat == 'pdb':\r\n with open(MaterialPath,'r') as File:\r\n with open(os.path.join(OutputPath,'Mols',MaterialNewName)+'.mol','w') as Mol:\r\n\r\n for Line in File.readlines():\r\n if Line[0:6] == 'CRYST1':\r\n CellLength.append(float('%.3f' % (float(Line[6:15].strip()))))\r\n CellLength.append(float('%.3f' % (float(Line[15:24].strip()))))\r\n CellLength.append(float('%.3f' % (float(Line[24:33].strip()))))\r\n CellAngle.append(float('%.2f' % (float(Line[33:40].strip()))))\r\n CellAngle.append(float('%.2f' % (float(Line[40:47].strip()))))\r\n CellAngle.append(float('%.2f' % (float(Line[47:54].strip()))))\r\n elif Line[0:6] == 'ATOM ':\r\n AtomNum += 1\r\n AtomInfo = []\r\n AtomInfo.append(int(Line[6:11].strip())) # serial\r\n AtomInfo.append(str(Line[12:16].strip())) # atom name\r\n AtomInfo.append(float(Line[30:38].strip())) # x\r\n AtomInfo.append(float(Line[38:46].strip())) # y\r\n AtomInfo.append(float(Line[46:54].strip())) # z\r\n\r\n if PDBCharges == True:\r\n AtomInfo.append(float(Line[54:60].strip())) # charge\r\n MaterialsCharge += float(Line[54:60].strip())\r\n else:\r\n AtomInfo.append(float('0.0')) # charge\r\n\r\n ElementList.append(str(Line[12:16].strip())) # atom name list\r\n AtomInfoList.append(AtomInfo)\r\n\r\n if abs(MaterialsCharge) < 0.001:\r\n Mol.write('# Basic Molecule Information\\n# Created by PyMSATm at %s\\n'\r\n 'Molecule_name: %s \\n\\nCoord_Info: Listed Cartesian None\\n' % (Time, MaterialNewName))\r\n else: Mol.write('# Basic Molecule Information\\n# Created by PyMSATm at %s\\n'\r\n 'Molecule_name: %s CHARGED\\n\\nCoord_Info: Listed Cartesian None\\n' % (Time, MaterialNewName))\r\n\r\n Mol.write(' %d\\n'%(AtomNum))\r\n\r\n for AtomInfo2 in AtomInfoList:\r\n Mol.write('{:<6d} {:>8} {:>8} {:>8} {:>6} {:>6} 0 0\\n'.format(AtomInfo2[0],\r\n AtomInfo2[2],AtomInfo2[3],AtomInfo2[4],AtomInfo2[1],AtomInfo2[5]))\r\n\r\n Mol.write('\\nFundcell_Info: Listed\\n%.5f %.5f %.5f\\n%.5f %.5f %.5f\\n0.00000 0.00000 0.00000\\n%.5f %.5f %.5f'%(CellLength[0],\r\n CellLength[1],CellLength[2],CellAngle[0],CellAngle[1],CellAngle[2],CellLength[0],CellLength[1],CellLength[2]))\r\n\r\n if MaterialInputFormat == 'mol':\r\n\r\n KeyOne,KeyTwo,KeyThree= False,False,False\r\n\r\n with open(MaterialPath, 'r') as File:\r\n with open(os.path.join(OutputPath, 'Mols', MaterialNewName) + '.mol', 'w') as Mol:\r\n\r\n for Line in File.readlines():\r\n if Line.strip(): # skip blank\r\n WordList = Line.strip().split()\r\n if WordList[0] == 'Coord_Info:':\r\n KeyOne = True\r\n elif WordList[0] == 'Fundcell_Info:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif len(WordList) > 1 and KeyOne == True:\r\n AtomNum += 1\r\n AtomInfo = []\r\n AtomInfo.append(int(WordList[0].strip())) # serial\r\n AtomInfo.append(str(WordList[4].strip())) # atom name\r\n AtomInfo.append(float(WordList[1].strip())) # x\r\n AtomInfo.append(float(WordList[2].strip())) # y\r\n AtomInfo.append(float(WordList[3].strip())) # z\r\n AtomInfo.append(float(WordList[5].strip())) # charge\r\n ElementList.append(str(WordList[4].strip())) # atom name list\r\n AtomInfoList.append(AtomInfo)\r\n MaterialsCharge += float(WordList[5].strip())\r\n elif len(WordList) > 1 and KeyTwo == True:\r\n KeyTwo = False\r\n KeyThree = True\r\n CellLength.append(float('%.3f' % (float(WordList[0].strip()))))\r\n CellLength.append(float('%.3f' % (float(WordList[1].strip()))))\r\n CellLength.append(float('%.3f' % (float(WordList[2].strip()))))\r\n elif len(WordList) > 1 and KeyThree == True:\r\n KeyThree = False\r\n CellAngle.append(float('%.2f' % (float(WordList[0].strip()))))\r\n CellAngle.append(float('%.2f' % (float(WordList[1].strip()))))\r\n CellAngle.append(float('%.2f' % (float(WordList[2].strip()))))\r\n\r\n if abs(MaterialsCharge) < 0.001:\r\n Mol.write('# Basic Molecule Information\\n# Created by PyMSATm at %s\\n'\r\n 'Molecule_name: %s \\n\\nCoord_Info: Listed Cartesian None\\n' % (\r\n Time, MaterialNewName))\r\n else:\r\n Mol.write('# Basic Molecule Information\\n# Created by PyMSATm at %s\\n'\r\n 'Molecule_name: %s CHARGED\\n\\nCoord_Info: Listed Cartesian None\\n' % (\r\n Time, MaterialNewName))\r\n\r\n Mol.write(' %d\\n' % (AtomNum))\r\n\r\n for AtomInfo2 in AtomInfoList:\r\n Mol.write('{:<6d} {:>8.4f} {:>8.4f} {:>8.4f} {:>8s} {:>10.6f} 0 0\\n'.format(AtomInfo2[0], AtomInfo2[2],\r\n AtomInfo2[3], AtomInfo2[4],\r\n AtomInfo2[1], AtomInfo2[5]))\r\n\r\n Mol.write(\r\n '\\nFundcell_Info: Listed\\n%.4f %.4f %.4f\\n%.4f %.4f %.4f\\n0.00000 0.00000 0.00000\\n%.4f %.4f %.4f' % (\r\n CellLength[0], CellLength[1], CellLength[2], CellAngle[0], CellAngle[1], CellAngle[2],\r\n CellLength[0], CellLength[1], CellLength[2]))\r\n\r\n if CellAngle[0] == 90.0 and CellAngle[1] == 90.0 and CellAngle[2] == 90.0:\r\n Orthogonality = True\r\n else: Orthogonality=False\r\n\r\n if Orthogonality == True:\r\n for Length in CellLength:\r\n Count = 1\r\n while Length * Count < CutOff * Multiple+1:\r\n Count += 1\r\n CellSize.append(str(Count))\r\n else: # definite the lengths of x, y, z in orthogonal directions\r\n\r\n SinA, SinB, SinC = math.sin(math.radians(CellAngle[0])), math.sin(math.radians(CellAngle[1])), math.sin(math.radians(CellAngle[2]))\r\n CosA, CosB, CosC = math.cos(math.radians(CellAngle[0])), math.cos(math.radians(CellAngle[1])), math.cos(math.radians(CellAngle[2]))\r\n CosParameterA = (CosC * CosA - CosB) / (SinC * SinA)\r\n SinParameterA = math.sqrt(1 - CosParameterA ** 2)\r\n CosParameterB = (CosA * CosB - CosC) / (SinA * SinB)\r\n SinParameterB = math.sqrt(1 - CosParameterB ** 2)\r\n CosParameterC = (CosB * CosC - CosA) / (SinB * SinC)\r\n SinParameterC = math.sqrt(1 - CosParameterC ** 2)\r\n x=CellLength[0]*SinC*SinParameterA\r\n y=CellLength[1]*SinA*SinParameterB\r\n z=CellLength[2]*SinB*SinParameterC\r\n TransLength=[x,y,z]\r\n\r\n for Length in TransLength:\r\n Count = 1\r\n while float(Length) * Count < CutOff * Multiple+1:\r\n Count += 1\r\n CellSize.append(str(Count))\r\n\r\n MaterialInfo.append(AtomNum)\r\n MaterialInfo.append(CellLength)\r\n MaterialInfo.append(CellAngle)\r\n MaterialInfo.append(CellSize)\r\n MaterialInfo.append(list(set(ElementList)))\r\n MaterialInfo.append(Orthogonality)\r\n MaterialInfo.append(MaterialNewName)\r\n MaterialInfoList.append(MaterialInfo)\r\n\r\n return MaterialInfoList", "def optimised_structure(self):\n\n # Run through the file and find all lines containing '==> Geometry', add these lines to a list.\n # Reverse the list\n # from the start of this list, jump down to the first atom and set this as the start point\n # Split the row into 4 columns: centre, x, y, z.\n # Add each row to a matrix.\n # Return the matrix.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n # Will contain index of all the lines containing '==> Geometry'.\n geo_pos_list = []\n for count, line in enumerate(lines):\n if \"==> Geometry\" in line:\n geo_pos_list.append(count)\n\n elif \"**** Optimization is complete!\" in line:\n opt_pos = count\n opt_steps = int(line.split()[5])\n\n if not (opt_pos and opt_steps):\n raise EOFError('According to the output.dat file, optimisation has not completed.')\n\n # now get the final opt_energy\n opt_energy = float(lines[opt_pos + opt_steps + 7].split()[1])\n\n # Set the start as the last instance of '==> Geometry'.\n start_of_vals = geo_pos_list[-1] + 9\n\n opt_struct = []\n\n for row in range(len(self.molecule.molecule['input'])):\n\n # Append the first 4 columns of each row, converting to float as necessary.\n struct_row = [lines[start_of_vals + row].split()[0]]\n for indx in range(3):\n struct_row.append(float(lines[start_of_vals + row].split()[indx + 1]))\n\n opt_struct.append(struct_row)\n\n return opt_struct, opt_energy", "def _parse_nscf(self) -> None:\n alat = 0\n lattice = np.zeros((3,3))\n recip = np.zeros((3,3))\n nbnd = 0\n natom = 0\n positions = []\n nk = 0\n symbols = []\n k_frac = []\n efermi = 0\n\n energy = {\"spinup\" : [],\n \"spindown\" : []\n }\n\n which = \"spinup\" # remember if we are reading spin up or spin down\n \n with open(self.output,'r') as f:\n aline=f.readline()\n\n while aline:\n # read information by checking the flags\n if \"lattice parameter (alat) =\" in aline:\n data = aline.split('=')[1]\n data = data.split()\n alat = float(data[0]) # in Bohr\n\n if \"number of Kohn-Sham states\" in aline:\n data = aline.split()[-1]\n nbnd = int(data)\n\n if \"number of atoms/cell\" in aline:\n data = aline.split()[-1]\n natom = int(data)\n\n if \"crystal axes: (cart. coord. in units of alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n lattice[i] = np.array(data, dtype = float) \n lattice *= alat * Bohr2A\n\n if \"reciprocal axes: (cart. coord. in units 2 pi/alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n recip[i] = np.array(data, dtype = float)\n recip *= 2 * np.pi / (alat * Bohr2A)\n\n if \"site n. atom positions (cryst. coord.)\" in aline:\n for i in range(natom):\n data = f.readline()\n symbols.append(re.findall(r'[A-Z][a-z]*', data)[0])\n positions.append(np.array(re.findall('-?\\d+\\.\\d+', data), dtype = float))\n \n if \"number of k points= \" in aline:\n nk = int( re.findall(r'\\d+', aline)[0] )\n k_frac = np.zeros((nk,3))\n\n if re.search(r'k\\(.+\\)\\s+=\\s+\\(.+\\)', aline) != None:\n parts = aline.split('=')\n ik = int( re.findall(r'\\d+', parts[0])[0] )\n pos = np.array(re.findall(r'-?\\d+\\.\\d+', parts[1]), dtype = float)\n k_frac[ik-1] = pos\n\n if \"the Fermi energy is\" in aline:\n efermi = float(re.findall(r'-?\\d+\\.\\d+', aline)[0])\n\n if \"------ SPIN UP ------------\" in aline:\n which = \"spinup\"\n\n if \"------ SPIN DOWN ----------\" in aline:\n which = \"spindown\"\n\n if re.search('k\\s+=\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s',aline) != None:\n kstr=re.findall(r'-?\\d+\\.\\d+',aline)\n\n f.readline()\n\n lenergy = [] # local energy for each k point\n while len(lenergy) < nbnd:\n aline = f.readline()\n data = np.array(aline.split(), dtype = float)\n for d in data:\n lenergy.append(d)\n\n if len(lenergy) > nbnd:\n raise \"length of energy > nbnd\"\n\n energy[which].append(lenergy)\n \n aline = f.readline()\n\n self.efermi = efermi\n self.lattice = lattice\n self.symbols = symbols \n self.positions = np.array(positions)\n self.reciprocal = recip\n self.kpoints = k_frac\n\n self.eig = {}\n self.eig[Spin.up] = np.array(energy[\"spinup\"]).T\n\n if energy[\"spindown\"]:\n self.spin_polarized = True\n self.eig[Spin.down] = np.array(energy[\"spindown\"]).T", "def generate_data(input_file):\n \n mol_mass_list = []\n inchi_list = []\n SMILES_list = []\n identifier_list = []\n inchi_key1_list = [] \n inchi_key2_list = [] \n mol_formula_list = []\n NA_list = []\n \n pre_SMILES_list = []\n identifier_list = []\n all_lines = input_file.split('\\n')\n if all_lines[-1] == '':\n all_lines = all_lines[:-1]\n for line in all_lines:\n line = line.split('\\t')\n\n #Convert to mol and remove invalid structures \n smile_string = ''\n id_string = ''\n m = line[0]\n id_name = line[1]\n mol = Chem.MolFromSmiles(m)\n if mol != None:\n smile_string += m\n id_string += id_name\n pre_SMILES_list += [smile_string]\n \n #Source identifiers\n identifier_list += [id_string]\n \n pre_inchi_list = []\n for smile in pre_SMILES_list:\n #Generate mol\n m = Chem.MolFromSmiles(smile)\n #SMILES, canonical\n sm = Chem.MolToSmiles(m)\n SMILES_list += [sm]\n #Monoisotopic mass\n mol_weigth = Descriptors.ExactMolWt(m)\n mol_mass_list += [mol_weigth]\n #Mol Forumula\n mol_formula = rdMolDescriptors.CalcMolFormula(m)\n mol_formula_list += [mol_formula]\n # InChI \n inchi = rdinchi.MolToInchi(m)\n pre_inchi_list += [inchi[0]] \n \n \n # InChIKey1 and InChIKey2\n for inchi in pre_inchi_list:\n if not str(inchi).startswith('InCh'):\n inchi = 'NA'\n inchi_list += [inchi]\n \n pre_inchi_key_list =[]\n for inchi2 in inchi_list: \n if inchi2 == 'NA':\n inchi_key = \"NA-NA\"\n pre_inchi_key_list += [inchi_key]\n if inchi2 != 'NA':\n inchi_key = rdinchi.InchiToInchiKey(inchi2)\n pre_inchi_key_list += [inchi_key]\n \n for inchi_key in pre_inchi_key_list:\n inchi_key = inchi_key.split('-')\n inchi_key2 = inchi_key[1]\n inchi_key2_list += [inchi_key2]\n inchi_key1 = inchi_key[0]\n inchi_key1_list += [inchi_key1]\n\n # NA list \n nr_of_structures = len(SMILES_list)\n NA_list += ['NA'] * nr_of_structures\n\n overall_list = [mol_mass_list]+[inchi_list]+[SMILES_list]+\\\n [identifier_list]+[inchi_key2_list]+[inchi_key1_list]+[mol_formula_list]+\\\n [NA_list]+[NA_list]+[NA_list]+[NA_list]\n \n return overall_list", "def test_psi4_efp_5c():\n subject = subject5 + '\\nno_com\\nfix_orientation\\nsymmetry c1'\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def read_sdf(fname: Union[str, Path]) -> List[Chem.Mol]:\n supplier = Chem.SDMolSupplier(str(fname), removeHs=False)\n mols = [mol for mol in supplier]\n return mols", "def ReadIndmfl(filename, fh_info):\n def divmodulo(x,n):\n \"We want to take modulo and divide in fortran way, so that it is compatible with fortran code\"\n return ( sign(x)* (abs(x)/n) , sign(x)*mod(abs(x),n))\n\n fh = open(filename, 'r')\n lines = [line.split('#')[0].strip() for line in fh.readlines()] # strip comments\n lines = (line for line in lines if line) # strip blank lines & create generator expression\n\n hybr_emin, hybr_emax, Qrenorm, projector = [float(x) for x in lines.next().split()[:4]]\n if projector>=4:\n hybr_emin = int(hybr_emin)\n hybr_emax = int(hybr_emax)\n matsubara, broadc, broadnc, om_npts, om_emin, om_emax = [float(e) for e in lines.next().split()[:6]]\n matsubara = int(matsubara) # recast these to integers\n om_npts = int(om_npts) \n\n atoms={}\n cps={}\n natom = int(lines.next())\n for i in range(natom):\n iatom, nL, locrot_shift = [int(x) for x in lines.next().split()]\n (shift,locrot) = divmodulo(locrot_shift,3)\n if locrot<0: locrot=3\n \n Ls, qsplits, icps = array([[int(x) for x in lines.next().split()] for i in range(nL)]).T\n new_zx = [[float(x) for x in lines.next().split()] for loro in range(abs(locrot))]\n vec_shift = [float(x) for x in lines.next().split()] if shift else None\n\n atoms[iatom] = (locrot, new_zx, vec_shift)\n for icp, L, qsplit in zip(icps, Ls, qsplits):\n if cps.has_key(icp):\n cps[icp] += [(iatom, L, qsplit)]\n else:\n cps[icp] = [(iatom, L, qsplit)]\n\n #####################################################\n # read the big block of siginds and cftrans\n ncp, maxdim, maxsize = [int(e) for e in lines.next().split()[:3]]\n legends={}\n siginds={}\n cftrans={}\n for i in range(ncp):\n icp, dim, size = [int(e) for e in lines.next().split()]\n legends[icp] = lines.next().split(\"'\")[1::2]\n siginds[icp] = array([[int(e) for e in lines.next().split()] for row in range(dim)])\n raw_cftrans = array([[float(e) for e in lines.next().split()] for row in range(dim)])\n cftrans[icp] = raw_cftrans[:,0::2] + raw_cftrans[:,1::2]*1j\n\n return (siginds, cftrans, cps)", "def test_matches_sdf(self, pdb_path, smiles, sdf_path):\n pdb_path = get_data_file_path(pdb_path)\n pdb_mol = Molecule.from_pdb_and_smiles(pdb_path, smiles)\n\n sdf_path = get_data_file_path(sdf_path)\n sdf_mol = Molecule.from_file(sdf_path)\n\n # Check that the SDF and PDB are isomorphic with identical atom ordering\n isomorphic, atom_map = Molecule.are_isomorphic(\n pdb_mol, sdf_mol, return_atom_map=True\n )\n assert isomorphic, \"SDF and PDB must be the same molecule\"\n assert atom_map == {\n i: i for i in range(pdb_mol.n_atoms)\n }, \"SDF and PDB must have same atom ordering\"\n\n # Check that the coordinates are identical\n np.testing.assert_allclose(\n np.asarray(sdf_mol.conformers[0].m),\n np.asarray(pdb_mol.conformers[0].m),\n atol=1e-4,\n err_msg=\"SDF and PDB must have identical conformers\",\n )\n\n # Not sure that the following are necessary given are_isomorphic,\n # but keeping them from previous test implementations\n\n # Check that the atom properties are identical (except metadata)\n for pdb_atom, sdf_atom in zip(pdb_mol.atoms, sdf_mol.atoms):\n pdb_atom_dict = pdb_atom.to_dict()\n del pdb_atom_dict[\"metadata\"]\n del pdb_atom_dict[\"name\"]\n\n sdf_atom_dict = sdf_atom.to_dict()\n del sdf_atom_dict[\"metadata\"]\n del sdf_atom_dict[\"name\"]\n assert sdf_atom_dict == pdb_atom_dict\n\n # Check that the bonds match, though possibly in a different order\n sdf_bonds = {\n tuple(sorted([bond.atom1_index, bond.atom2_index])): bond\n for bond in sdf_mol.bonds\n }\n\n for pdb_bond in pdb_mol.bonds:\n key = tuple(sorted([pdb_bond.atom1_index, pdb_bond.atom2_index]))\n assert key in sdf_bonds\n assert pdb_bond.is_aromatic == sdf_bonds[key].is_aromatic\n assert pdb_bond.stereochemistry == sdf_bonds[key].stereochemistry", "def mol_file_data(file_name: str, molfile: MolFile = None):\n\tans = \"\"\n\tatoms = list()\n\tValenceAngle.objects.all().delete()\n\tValenceAngleLink.objects.all().delete()\n\n\tmatrix_z_coordinates = \"unknown\"\n\tmatrix_z_units = \"unknown\"\n\tmz_espec = 2\n\tmz_skipped = False\n\tmz_cline = 0\n\tmz_crow = 0\n\tmz_last_line = 0\n\tmz_next_column = 0\n\n\tmatrix_z_lines = list()\n\tactive_doc = None\n\tatom_number = 1\n\ttry:\n\t\tactive_doc = Document.objects.get(is_active=True)\n\t\tatom_number = len(Atom.objects.filter(document=active_doc)) + 1\n\texcept Document.DoesNotExist:\n\t\tpass\n\n\t# чтение файла file_name\n\ttry:\n\t\twith open(file_name) as f:\n\t\t\tlines = f.readlines()\n\t\t\tf.seek(0)\n\t\t\ttext = f.read()\n\t\t\tmolfile = MolFile.objects.create(text=text)\n\n\texcept FileNotFoundError as ex:\n\t\tans += \"error while reading .mol data. \"\n\t\tans += str(ex)\n\t\t# ans += str(os.listdir())\n\t\traise MolFileReadingException(\"File not found\")\n\n\tmode = \"scan\" # активный режим работы\n\tn = len(lines)\n\ti = 0\n\twhile i < n: # цикл по строкам файла\n\t\ti += 1\n\t\tif mode == \"end\":\n\t\t\tbreak\n\n\t\tline = lines[i-1].split(\"//\")[0]\n\t\tif not line:\n\t\t\tcontinue\n\t\tans += dprint(\">> \" + line + \"<br/>\")\n\n\t\tif mode == \"scan\":\n\t\t\tif \"[Atoms]\" in line:\n\t\t\t\tdprint(\"GO readAtoms\")\n\t\t\t\tmode = \"readAtoms\"\n\t\t\t\tcontinue\n\t\t\tif \"[Matrix Z]\" in line:\n\t\t\t\tdprint(\"Go readMatrixZ\")\n\t\t\t\tmode = \"readMatrixZ\"\n\t\t\t\tcontinue\n\t\tif mode == \"readMatrixZ\":\n\t\t\tif line.isspace():\n\t\t\t\tcontinue\n\t\t\tif not line.startswith(\"[\") and i != n: # not end of readMatrixZ section and not end of file\n\t\t\t\tmatrix_z_lines.append(resub(r\"[ \\t\\r\\n]+\", \" \", line.replace(\"\\r\", \"\").replace(\"\\n\", \"\")))\n\t\t\telse: # end of readMatrixZ\n\t\t\t\tmz_size = len(atoms) * 3\n\t\t\t\tmatrix_z = np.zeros((mz_size, mz_size), dtype=np.float32)\n\n\t\t\t\tfor mline in matrix_z_lines:\n\t\t\t\t\tif mline.startswith(\"Coordinates=\"):\n\t\t\t\t\t\tmatrix_z_coordinates = mline.split('=')[1]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif mline.startswith(\"Units=\"):\n\t\t\t\t\t\tmatrix_z_units = mline.split('=')[1]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsplited = list(filter(None, mline.split(\" \")))\n\t\t\t\t\tif len(splited) != mz_espec:\n\t\t\t\t\t\tif not mz_skipped: # first time skipped\n\t\t\t\t\t\t\tmz_skipped = True\n\t\t\t\t\t\t\tmz_espec -= 1\n\t\t\t\t\t\t\tmz_last_line = mz_cline\n\t\t\t\t\t\t\tmz_next_column += len(splited) - 1\n\t\t\t\t\t\telse: # already skipped\n\t\t\t\t\t\t\tmz_espec -= 1\n\n\t\t\t\t\tif not mz_skipped: # normal line\n\t\t\t\t\t\tfor ind in range(mz_espec-1):\n\t\t\t\t\t\t\tval = float(splited[ind+1])\n\t\t\t\t\t\t\tmatrix_z[mz_cline, mz_crow+ind] = val\n\t\t\t\t\t\t\tmatrix_z[mz_crow+ind, mz_cline] = val\n\t\t\t\t\t\tmz_espec += 1\n\t\t\t\t\t\tmz_cline += 1\n\t\t\t\t\telse: # line with skip\n\t\t\t\t\t\tif len(splited) != mz_espec:\n\t\t\t\t\t\t\tmz_skipped = False\n\t\t\t\t\t\t\tmz_espec = 2\n\t\t\t\t\t\t\tmz_cline = mz_last_line\n\t\t\t\t\t\t\tmz_crow = mz_next_column\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tfor ind in range(len(splited) - 1):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tval = float(splited[ind+1])\n\t\t\t\t\t\t\t\tmatrix_z[mz_cline, mz_crow + ind] = val\n\t\t\t\t\t\t\t\tmatrix_z[mz_crow + ind, mz_cline] = val\n\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\tmz_espec = 2\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tmz_espec += 1\n\t\t\t\t\t\tmz_cline += 1\n\n\t\t\t\t# сохранение результатов чтения\n\t\t\t\tMatrixZ.objects.create(\n\t\t\t\t\towner=molfile,\n\t\t\t\t\tcoordinates=matrix_z_coordinates,\n\t\t\t\t\tunits=matrix_z_units,\n\t\t\t\t\tdata=matrix_z.dumps()\n\t\t\t\t)\n\n\t\t\t\tmode = \"scan\"\n\n\t\telif mode == \"readAtoms\": # считывание информации об атомах\n\t\t\tif line.isspace(): # пустая строка - это конец считывания\n\t\t\t\t# mode = \"scan\"\n\t\t\t\tdprint(\"END: readAtoms: finded end<br/>\")\n\t\t\t\tmode = \"scan\"\n\t\t\t\tcontinue\n\t\t\tif line.startswith('//') or line.lower().startswith(\"length\") or line.lower().startswith(\"count\"):\n\t\t\t\tcontinue\n\n\t\t\telems = line.strip().split(' ')\n\t\t\telems = list(filter(None, elems))\n\n\t\t\tfirst = elems[0]\n\t\t\ttry:\n\t\t\t\tif first == \"[Atoms]\":\n\t\t\t\t\tcontinue\n\t\t\t\tdprint(\"first: \" + str(first) + \"<br/>\")\n\t\t\t\tdprint(elems)\n\t\t\t\tnumber = int(first)\n\t\t\t\tdprint(\"ReadAtom [{}]\".format(number))\n\t\t\t\tax = float(elems[1])\n\t\t\t\tdprint(\"!\")\n\t\t\t\tay = float(elems[2])\n\t\t\t\taz = float(elems[3])\n\t\t\t\tname = elems[4]\n\t\t\t\tmass = int(elems[5])\n\t\t\t\tnew_atom = Atom(\n\t\t\t\t\tx=ax, y=ay, z=az, name=name, mass=mass, document=active_doc, molfileindex=number)\n\t\t\t\tnew_atom.molfile = molfile\n\t\t\t\tnew_atom.documentindex = atom_number\n\t\t\t\tatom_number += 1\n\t\t\t\tnew_atom.save()\n\n\t\t\t\tif new_atom.name == \"H\":\n\t\t\t\t\tnew_atom.valence = 1\n\t\t\t\t\tnew_atom.mentableindex = 0\n\n\t\t\t\tif new_atom.name == \"C\":\n\t\t\t\t\tnew_atom.valence = 4\n\t\t\t\t\tnew_atom.mentableindex = 5\n\n\t\t\t\tatoms.append(new_atom)\n\n\t\t\texcept ValueError as ex:\n\t\t\t\tdprint(\"get_last_mol_file error: \" + str(ex))\n\t\t\t\tmode = \"scan\"\n\t\t\t\tcontinue\n\t\telif mode == \"readMatrixZ\":\n\t\t\tpass\n\n\t# считывание из файла завершено заполнен список atoms\n\t# ans = atoms2json(atoms)\n\t# return ans\n\n\t# вернём активный документ\n\treturn atoms", "def test_from_pdb_and_smiles(self):\n # try and make a molecule from a pdb and smiles that don't match\n with pytest.raises(InvalidConformerError):\n mol = Molecule.from_pdb_and_smiles(\n get_data_file_path(\"molecules/toluene.pdb\"), \"CC\"\n )\n\n # make a molecule from the toluene pdb file and the correct smiles\n mol = Molecule.from_pdb_and_smiles(\n get_data_file_path(\"molecules/toluene.pdb\"), \"Cc1ccccc1\"\n )\n\n # make toluene from the sdf file\n mol_sdf = Molecule.from_file(get_data_file_path(\"molecules/toluene.sdf\"))\n # get the mapping between them and compare the properties\n isomorphic, atom_map = Molecule.are_isomorphic(\n mol, mol_sdf, return_atom_map=True\n )\n assert isomorphic is True\n for pdb_atom, sdf_atom in atom_map.items():\n assert mol.atoms[pdb_atom].to_dict() == mol_sdf.atoms[sdf_atom].to_dict()\n # check bonds match, however there order might not\n sdf_bonds = dict(\n ((bond.atom1_index, bond.atom2_index), bond) for bond in mol_sdf.bonds\n )\n for bond in mol.bonds:\n key = (atom_map[bond.atom1_index], atom_map[bond.atom2_index])\n if key not in sdf_bonds:\n key = tuple(reversed(key))\n assert key in sdf_bonds\n # now compare the attributes\n assert bond.is_aromatic == sdf_bonds[key].is_aromatic\n assert bond.stereochemistry == sdf_bonds[key].stereochemistry", "def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()", "def __init__(\n molfile,\n directory=\"/home/oohnohnoh1/Desktop/GIT/Chemiinformatics_work/Chemistry2quant/src/chemistry2quant/WIP\",\n sdf_file=\"bzr.sdf\",\n ):\n super().__init__(directory, sdf_file)\n \"\"\"\n\t\tInheriting from the rdkitProcessDf and initializng for the methods within there\n\t\t\"\"\"\n self.molfile = molfile", "def input_file_parser(cls):\n \n # Loop through the file and store lines in an appropriate list that is passed to other class functions\n with open(cls.infile_name,'r') as infile:\n for line in infile: # Loop through the whole file\n if '$molecule' in line: # Search for a section header\n for line in infile: # Enter second loop over the lines in the section\n if '$end' in line: # If you find $end, stop loop as the section is finished\n break\n else: # Otherwise add the line to a list\n cls.molecule_lines.append(line.strip())\n if '$connection' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.connector_lines.append(line.strip())\n if '$options' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.options_lines.append(line.strip())\n\n return None", "def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)", "def _load_molecule(self):\n self.pymol = pybel.readstring(self.input_format, self.file_dic['input'])", "def convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path):\n\n files = []\n\n if os.path.isdir(sdfs_folder_path):\n # so it's a directory, go through the directory and find all the sdf files\n if sdfs_folder_path[-1:] != os.sep:\n sdfs_folder_path = (\n sdfs_folder_path + os.sep\n ) # so add a / to the end of the directory\n\n files.extend(glob.glob(sdfs_folder_path + \"*.sdf\"))\n files.extend(glob.glob(sdfs_folder_path + \"*.SDF\"))\n files = list(set(files))\n if len(files) == 0:\n printout = \"\\nThere are no sdf's to convert to PDB's. There may be an issue with Gypsum.\\n\"\n print(printout)\n raise Exception(printout)\n\n # create a new subfolder if one doesn't already exist. folder will be with\n # the generation and will be titled PDBs pdb_subfolder_path will become\n # the the output folder\n pdb_subfolder_path = gen_folder_path + \"PDBs\" + os.sep\n if not os.path.isdir(pdb_subfolder_path):\n os.makedirs(pdb_subfolder_path)\n\n job_inputs = []\n for file_path in files:\n if \"params\" in file_path:\n continue\n job_inputs.append(tuple([pdb_subfolder_path, file_path]))\n job_inputs = tuple(job_inputs)\n\n # Check that there are .sdf files to test. If not raise Exception\n if len(job_inputs) == 0:\n printout = \"\\n\\nThere are no SDF files were found to convert to PDB. \"\n printout = printout + \"This may be a problem with the Gypsum-DL \"\n printout = printout + \"settings.\\nPlease check that the `--gypsum_timeout_limit` \"\n printout = printout + \"is appropriate relative to the `--gypsum_thoroughness` \"\n printout = printout + \"and `--max_variants_per_compound` parameters.\\n\"\n raise Exception(printout)\n\n # Convert sdf files to pdbs in multithread\n vars[\"parallelizer\"].run(job_inputs, convert_single_sdf_to_pdb)", "def read_formations():\n\n with resource_stream('component_contribution',\n FullTrainingData.FORMATION_ENERGY_FNAME) as fp:\n formation_df = pd.read_csv(gzip.GzipFile(fileobj=fp))\n\n cids_that_dont_decompose = set(\n formation_df.loc[formation_df['decompose'] == 0, 'cid'])\n\n for col in [\"dG'0\", \"T\", \"I\", \"pH\", \"pMg\"]:\n formation_df[col] = formation_df[col].apply(float)\n\n formation_df = formation_df[~pd.isnull(formation_df[\"dG'0\"])]\n formation_df['reaction'] = formation_df['cid'].apply(\n lambda c: Reaction({c: 1}))\n\n formation_df['balance'] = False\n formation_df['description'] = formation_df['name'] + ' formation'\n formation_df.rename(columns={'compound_ref': 'reference'},\n inplace=True)\n formation_df.drop(['name', 'cid', 'remark', 'decompose'],\n axis=1, inplace=True)\n\n logger.debug('Successfully added %d formation energies' %\n formation_df.shape[0])\n return formation_df, cids_that_dont_decompose", "def parse_pyscf_atom(atom):\n\n atom_split = atom.split()\n atoms = []\n xyz = np.zeros((int(len(atom_split) / 4), 3))\n\n for i in range(xyz.shape[0]):\n atoms.append(atom_split[4 * i])\n xyz[i, 0] = float(atom_split[4 * i + 1])\n xyz[i, 1] = float(atom_split[4 * i + 2])\n xyz[i, 2] = float(atom_split[4 * i + 3])\n\n return Molecule(xyz, atoms)", "def load_structure(self, **kwargs):\n\n\t\t# PDB fields\n\t\tself.s_name = kwargs[\"s_name\"]\t\t\t\t\t\t\t\t# Name of the structure\n\t\tself.l_s_leading_data = kwargs[\"l_s_leading_data\"]\t\t\t# PDB information written above the atom properties\n\t\tself.l_s_trailing_data = kwargs[\"l_s_trailing_data\"]\t\t# PDB information written under the atom properties\n\n\t\t# Structural fields\n\t\tself.i_atom_count = len(kwargs[\"d_atoms\"][\"element_type\"])\t\t# Retrieves the number of atoms\n\t\tself.a_atoms = np.arange(self.i_atom_count).astype(\t\t\t\t# Array of atoms properties\n\t\t\tnp.dtype([\n\t\t\t\t(\"element_type\", np.str, 6),\t\t\t\t# ATOM or HETATM\n\t\t\t\t(\"atom_serial\", np.uint16, 1),\t\t\t\t# Atom serial number\n\t\t\t\t(\"atom_name\", np.str, 4),\t\t\t\t\t# Atom name\n\t\t\t\t(\"alternative_location\", np.str, 1),\t\t# Alternate location indicator\n\t\t\t\t(\"residue_name\", np.str, 3),\t\t\t\t# Residue name\n\t\t\t\t(\"chain_id\", np.str, 1),\t\t\t\t\t# Chain identifier\n\t\t\t\t(\"residue_serial\", np.int16, 1),\t\t\t# Residue sequence number\n\t\t\t\t(\"residue_insertion\", np.str, 1),\t\t\t# Code for insertion of residues\n\t\t\t\t(\"coord_x\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for X in Angstroms\n\t\t\t\t(\"coord_y\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Y in Angstroms\n\t\t\t\t(\"coord_z\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Z in Angstroms\n\t\t\t\t(\"occupancy\", np.float16, 1),\t\t\t\t# Occupancy\n\t\t\t\t(\"temperature_factor\", np.float16, 1),\t\t# Temperature factor\n\t\t\t\t(\"element_symbol\", np.str, 2),\t\t\t\t# Element symbol\n\t\t\t\t(\"element_charge\", np.str, 2),\t\t\t\t# Charge on the atom\n\t\t\t\t(\"element_mass\", np.float16, 1),\t\t\t# Mass of the atom\n\t\t\t\t(\"grid_x\", np.int16, 1),\t\t\t\t\t# X coordinates in the grid\n\t\t\t\t(\"grid_y\", np.int16, 1),\t\t\t\t\t# Y coordinates in the grid\n\t\t\t\t(\"grid_z\", np.int16, 1),\t\t\t\t\t# Z coordinates in the grid\n\t\t\t\t(\"custom_type\", np.str, 3),\t\t\t\t\t# A custom name for the element\n\t\t\t])\n\t\t)\n\n\t\t# For each field to save\n\t\tfor s_key in kwargs[\"d_atoms\"]:\n\t\t\tself.a_atoms[s_key] = kwargs[\"d_atoms\"][s_key]\t\t# Saves each field of the dictionary of atom properties\n\n\t\tself.a_atoms[\"element_mass\"] = retrieve_element_mass(\t\t# Retrieves the atomic mass of the given elements\n\t\t\tx_element_symbol=self.a_atoms[\"element_symbol\"],\t\t# Element symbol\n\t\t\tx_backup_symbol=self.a_atoms[\"atom_name\"]\t\t\t\t# Element symbol in case of fail\n\t\t)\n\t\tself.translate_custom_types()\t\t# Translates to the custom element types\n\n\t\tself.l_l_elements = set(self.a_atoms[\"element_symbol\"])\t\t# List all the different elements contained in the structure\n\t\tl_s_elements = [None] * len(gp.D_ELEMENT_NUMBER)\t\t\t# Creates an empty list with a slot for each possible element\n\n\t\t# For each chemical element\n\t\tfor s_element in self.l_l_elements:\n\n\t\t\ti_element_number = gp.D_ELEMENT_NUMBER[s_element]\t\t\t# Retrieves the atomic number of the element\n\t\t\ta_element_indexes = np.where(\t\t\t\t\t\t\t\t# Retrieves the indexes of the elements\n\t\t\t\tself.a_atoms[\"element_symbol\"] == s_element\n\t\t\t)\n\n\t\t\tl_s_elements[i_element_number] = [\t\t# Orders each element by their atomic number\n\t\t\t\ts_element,\t\t\t\t\t\t\t# Element symbol\n\t\t\t\ti_element_number,\t\t\t\t\t# Atomic number of the element\n\t\t\t\ta_element_indexes,\t\t\t\t\t# Indexes of the element in the structure\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# Coordinates of the element in the grid\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# VdW radius of the element\n\t\t\t\tNone\t\t\t\t\t\t\t\t# Sphere coordinates of the element\n\t\t\t]\n\t\t# End for\n\n\t\tself.l_l_elements = list(filter(None, l_s_elements))\t\t# Removes empty elements in the list\n\n\t\t# Miscellaneous fields\n\t\tself.f_mass = sum(self.a_atoms[\"element_mass\"])\t\t# Sums the mass of each element" ]
[ "0.6264046", "0.59976745", "0.58319986", "0.5608725", "0.56042486", "0.55735916", "0.5559321", "0.5514504", "0.5502829", "0.5502089", "0.54991895", "0.5464917", "0.5451997", "0.5418302", "0.54164004", "0.54018366", "0.53971356", "0.5395792", "0.53391236", "0.5336778", "0.53364587", "0.532151", "0.5314688", "0.52490056", "0.52022976", "0.51966876", "0.5178005", "0.51756227", "0.5170928", "0.5151827" ]
0.6674122
0
Extract a date from the JSON filing, at the provided path.
def get_date(filing: Dict, path: str) -> date: try: raw = dpath.util.get(filing, path) return date.fromisoformat(raw) except (IndexError, KeyError, TypeError, ValueError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fromJSON(self, path='') -> dict:\n try:\n return(importJSON(path))\n except Exception as error:\n print(f\"Error: self.fromJSON({path}) -> {error}\")", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def get_date(data):\r\n data = json.loads(data)\r\n dates = data.get(\"ReceiptData\", {\"orderDate\": []})\r\n \r\n # Make sure we get all products in the cart.\r\n return dates['orderDate']", "def get_json_value(file_path, json_path):\n # find absolute path\n cwd = os.getcwd()\n\n if not os.path.isabs(file_path):\n file_path = os.path.abspath(os.path.join(cwd, file_path))\n\n # fix json_path\n if json_path.startswith(\"$.\"):\n json_path = json_path.replace(\"$.\", \"\", 1)\n\n with open(file_path, \"rb\") as f:\n data = json.loads(strip_comments(f.read().decode(\"utf-8\")))\n\n value = data\n for part in json_path.split(\".\"):\n if part in value:\n value = value[part]\n else:\n raise ValueError(\"'$.{}' not found in {}\".format(json_path, file_path))\n return value", "def get_file_date(self, file: str) -> date:", "def json_dates_hook(dict):\n try:\n dict['data'] = dateparser.parse(dict['data'])\n return dict\n except KeyError:\n return dict", "def actual(self, path: str):\n nodes = path.split('.')\n resp_json = self.json\n for node in nodes:\n resp_json = resp_json.get(node)\n return resp_json", "def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise", "def parse_json_from_path(self, infile_path):\r\n with open(infile_path, 'r') as infile:\r\n return self.parse_from_json(infile)", "def get_date(myjson_object):\n from datetime import datetime\n if 'created_at' in myjson_object:\n parsed_date= parse_date(myjson_object['created_at'])\n return parsed_date.strftime(\"%Y-%m-%d\")\n if 'timestamp' in myjson_object:\n return datetime.utcfromtimestamp(myjson_object['timestamp']/1000).strftime(\"%Y-%m-%d\")\n return None", "def get_date_from_filename(file_path):\n file_name = basename(file_path)\n name, _ = splitext(file_name)\n _, date = name.split('_')\n\n return date", "def json_hook(obj):\n if \"$dt\" in obj:\n return datetime.datetime.strptime(obj[\"$dt\"], \"%Y-%m-%dT%H:%M:%S.%f\")\n return obj", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def load_path(path):\n\n p = PathWalker(path)\n listfiles = p.walk()\n\n full_json = dict()\n\n for file in listfiles:\n try:\n gz = GzipFile(filename=file, mode='rb')\n except Exception:\n raise Exception(\"cant open file\", str(file))\n cur_json = get_formatted_dict(json.load(gz))\n full_json.update(cur_json)\n\n return full_json", "def extract_date_info(object_key):\n pacific = pytz.timezone('America/Los_Angeles')\n first_parts = object_key.split(\"/\")\n capture_type = first_parts[4]\n last_part_idx = len(first_parts) - 1\n file_name = first_parts[last_part_idx]\n\n # now parse the date and time out of the file name\n second_parts = file_name.split(\"_\")\n last_part_idx = len(second_parts) - 1\n if capture_type == 'snap':\n date_time_string = second_parts[last_part_idx]\n if date_time_string.endswith('.jpg'):\n date_time_string = date_time_string[:-4]\n # FIN\n final_parts = date_time_string.split(\"-\")\n date_part = final_parts[0]\n time_part = final_parts[1]\n\n # FIN\n # FIN\n if capture_type == 'record':\n time_part = second_parts[last_part_idx]\n date_part = second_parts[(last_part_idx - 1)]\n if time_part.endswith('.mp4'):\n time_part = time_part[:-4]\n # FIN\n\n # parse out our date\n year = date_part[:4]\n date_part = date_part[4:]\n month = date_part[:2]\n day = date_part[2:]\n\n # parse out the time\n hour = time_part[:2]\n time_part = time_part[2:]\n seconds = time_part[2:]\n minutes = time_part[:2]\n\n if hour[:1] == '0':\n hour = hour[1:]\n if month[:1] == '0':\n month = month[1:]\n if day[:1] == '0':\n day = day[1:]\n\n this_date = datetime.datetime(int(year), int(month), int(day), int(hour),\n int(minutes), int(seconds), 0, pacific)\n return_object = {'isodate': this_date.isoformat(),\n 'year': year,\n 'month': month,\n 'day': day,\n 'hour': hour,\n 'minutes': minutes,\n 'seconds': seconds}\n return return_object", "def read_data_file(data_file_path):\n df = pd.read_json(data_file_path)\n df = parse_column_names(df)\n return create_datetime_column(df)", "def datetime_from_json(data):\n\n a = data['meta']['dateobs']\n year = int(a[:4])\n month = int(a[4:6])\n day = int(a[6:8])\n hour = int(a[9:11])\n time1 = datetime.datetime(year, month, day, hour)\n return time1", "def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json", "def extract_json(path, json_files):\n extracted_dicts = []\n for json_file in json_files:\n with open(os.path.join(path, json_file)) as f:\n extracted_dicts.append(json.load(f))\n return extracted_dicts", "def load_json(self, unformatted_path: str):\n formatted_path = unformatted_path.format(\n experiment_folder=self.experiment_folder\n )\n if not os.path.isfile(formatted_path):\n self.dump_json(formatted_path, data={})\n with open(formatted_path, \"r\") as infile:\n json_data = json.load(infile)\n return json_data", "def load_json(path):\n with open(normpath(path), 'r', encoding='utf-8') as file:\n return json.load(file)", "def from_path(cls, path: str):\n with open(path) as f:\n return json.load(f)", "def test_json_to_date(self):\n json_date_string = \"2018-10-13 12:12:12\"\n the_date = ct.json_to_date(json_date_string)\n assert isinstance(the_date, datetime)\n assert the_date.year == 2018\n assert the_date.month == 10", "def get_json_from_file(path):\n\n try:\n with open(path) as f:\n return json.load(f)\n\n except Exception as err:\n # logger.error(err)\n logger.info(f'FILE NOT FOUND - {path}')", "def _get_json(self, path):\n cur_dir = path_stroke_fix(path)\n path = f\"{cur_dir}config/config.json\"\n return json.load(open(path, 'r'))", "def open_json(path):\n with open(path, \"r\") as json_data_file:\n data = json.load(json_data_file)\n return data", "def get_date_input_file(file: str) -> str:\n # check format\n if not match_input_files(file):\n raise Exception(\"Not valid input file format\")\n\n else:\n date = result = re.search(r\"input_population_(.*)\\.feather\", file)\n return date.group(1)", "def load_daily_trivia(path=\"settings/daily_trivia.json\"):\n with open(path, encoding='utf-8') as json_data:\n return json.load(json_data)", "def from_json(path: str):\n with open(path) as f:\n return json.load(f)", "def extract_data(line):\n lines = line.split(' - ')\n return json.loads(lines[1])" ]
[ "0.6075106", "0.5673903", "0.56059843", "0.5590737", "0.55821204", "0.5564711", "0.55302495", "0.54868776", "0.54759276", "0.54637516", "0.5429487", "0.5429347", "0.5408953", "0.5400296", "0.53059995", "0.530356", "0.52951413", "0.5294175", "0.5283827", "0.5268045", "0.5236056", "0.52322644", "0.5195489", "0.5193321", "0.5180424", "0.5175355", "0.51709276", "0.5165871", "0.5161105", "0.51346385" ]
0.6837925
0
Extract a str from the JSON filing, at the provided path.
def get_str(filing: Dict, path: str) -> str: try: raw = dpath.util.get(filing, path) return str(raw) except (IndexError, KeyError, TypeError, ValueError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_json_str(self):\n\n with open(self.path, mode='r', encoding='utf-8') as file:\n return file.read()", "def actual(self, path: str):\n nodes = path.split('.')\n resp_json = self.json\n for node in nodes:\n resp_json = resp_json.get(node)\n return resp_json", "def get_name_from_txt (txtpath):\r\n f= open(txtpath,\"r\")\r\n contents = json.load(f)\r\n #f.close \r\n return contents", "def get_json_value(file_path, json_path):\n # find absolute path\n cwd = os.getcwd()\n\n if not os.path.isabs(file_path):\n file_path = os.path.abspath(os.path.join(cwd, file_path))\n\n # fix json_path\n if json_path.startswith(\"$.\"):\n json_path = json_path.replace(\"$.\", \"\", 1)\n\n with open(file_path, \"rb\") as f:\n data = json.loads(strip_comments(f.read().decode(\"utf-8\")))\n\n value = data\n for part in json_path.split(\".\"):\n if part in value:\n value = value[part]\n else:\n raise ValueError(\"'$.{}' not found in {}\".format(json_path, file_path))\n return value", "def processed_json_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')", "def get_json_path(file_path):\n file_path_parts = file_path.split('/')\n data_html = '/'.join(file_path_parts[0:2]) # data/html\n\n filename = file_path_parts[2] # Agile_Data_Code_2.html\n json_name = re.sub('html$', 'json', filename) # Agile_Data_Code_2.json\n\n text_path = '{}/text/{}'.format(\n data_html,\n json_name\n )\n return text_path, filename", "def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json", "def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise", "def fromJSON(self, path='') -> dict:\n try:\n return(importJSON(path))\n except Exception as error:\n print(f\"Error: self.fromJSON({path}) -> {error}\")", "def get(self, path):\n base = 'http://%s:%d' % (self.host, self.port)\n url = '%s/%s' % (base, path)\n\n conn = urllib2.urlopen(url)\n payload = conn.read()\n try: \n payload = json.loads(payload)\n except:\n pass\n\n conn.close()\n\n return payload", "def get(self, path):\n query_str = '?columns=ID,%s' % path\n\n get_uri = uri_parent(self._eobj._uri) + query_str\n jdata = JsonTable(self._intf._get_json(get_uri)\n ).where(ID=self._get_id())\n\n # unfortunately the return headers do not always have the\n # expected name\n\n header = difflib.get_close_matches(path.split('/')[-1],\n jdata.headers()\n )\n\n if header == []:\n header = difflib.get_close_matches(path, jdata.headers())[0]\n else:\n header = header[0]\n\n replaceSlashS = lambda x : x.replace('\\s', ' ')\n if type(jdata.get(header)) == list:\n return map(replaceSlashS, jdata.get(header))\n else:\n return jdata.get(header).replace('\\s', ' ')", "def _get_json(self, path):\n cur_dir = path_stroke_fix(path)\n path = f\"{cur_dir}config/config.json\"\n return json.load(open(path, 'r'))", "def load_json(path):\n with open(normpath(path), 'r', encoding='utf-8') as file:\n return json.load(file)", "def loadJson (self, path):\n\n # get all lines in json, concatenate then into a big string then parse it\n with open(path, \"r\") as file_content:\n all_lines = file_content.readlines()\n all_content_str = \"\".join(all_lines)\n json_dict = json.loads(all_content_str)\n self.tile_reprs = list(json_dict['tiles']['structural-tiles'].keys())\n\n # remove this empty char\n self.tile_reprs.remove(\"-\")", "def extract_path(path: str) -> str:\n return _RE_URL.sub(r'{\\1}', path)", "def extract_data(line):\n lines = line.split(' - ')\n return json.loads(lines[1])", "def _get_json_file_content(file_dir_arg):\n result = None\n\n try:\n with open(file_dir_arg, 'r', encoding='UTF-8-SIG') as f:\n result_tmp = f\n result = load(result_tmp)\n except Exception as e:\n print(e.args)\n\n return result", "def load_json(path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'r') as json_file:\n return json.load(json_file)", "def _pretty_json_path(self, path):\r\n segments = path.split('.')\r\n\r\n def builder(prev, cur):\r\n if re.match(cur):\r\n return \"{0}[]\".format(prev)\r\n return \"{0}.{1}\".format(prev, cur)\r\n\r\n segments = reduce(builder, segments)\r\n return segments", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def load_json(self, unformatted_path: str):\n formatted_path = unformatted_path.format(\n experiment_folder=self.experiment_folder\n )\n if not os.path.isfile(formatted_path):\n self.dump_json(formatted_path, data={})\n with open(formatted_path, \"r\") as infile:\n json_data = json.load(infile)\n return json_data", "def get_content(self, file_path):\n file_name = Path(file_path).name.replace(\"\\uf010\", \"\\u0010\")\n\n root = Path(self.root)\n info_path = Path(file_path).parent.parent / \"metadata_mls.json\"\n info = json.load((root / info_path).open())\n\n content = info.get(file_name, None)\n if content is None:\n print(file_name)\n return None\n return content[\"original\"]", "def from_path(cls, path: str):\n with open(path) as f:\n return json.load(f)", "def json_full_path(base_path, key):\n if base_path is None or base_path == \"\":\n return key\n else:\n return f'{base_path}.{key}'", "def readjson(path):\n\twith open(path, 'r') as file:\n\t\treturn json.load(file)", "def get_json_from_file(path):\n\n try:\n with open(path) as f:\n return json.load(f)\n\n except Exception as err:\n # logger.error(err)\n logger.info(f'FILE NOT FOUND - {path}')", "def jsonpath_to_xpath(path):\n return '/' + path.replace('.', \"/\")", "def get_strDrink(json):\n\n strDrink = introcs.find_str(json,'\"strDrink\"')\n\n\n string = json[strDrink+10:]\n\n\n result = first_inside_quotes(string)\n\n\n return result", "def _extract_json_from_output(self, output):\n json_beg = output.find('{')\n json_end = output.rfind('}') + 1\n same_line = False\n\n # Make sure that the opening curly bracket is on the same line as the\n # closing one.\n while not same_line:\n if json_beg == -1 or json_end == -1:\n raise NoJsonFoundError('No JSON found in the output')\n elif output.find('\\n', json_beg, json_end) == -1:\n same_line = True\n else:\n json_beg = output.find('{', json_beg, json_end)\n\n return output[json_beg:json_end]", "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path" ]
[ "0.6552176", "0.6368723", "0.62871057", "0.62073576", "0.6198753", "0.61034834", "0.5978531", "0.5907652", "0.59014565", "0.5865438", "0.58326775", "0.58097404", "0.5789452", "0.5682722", "0.56802696", "0.5669492", "0.5638627", "0.5630336", "0.56209874", "0.5619586", "0.5601393", "0.55950147", "0.55740565", "0.55461615", "0.55437374", "0.5529414", "0.55109423", "0.5510423", "0.5506682", "0.5502363" ]
0.6647993
0
Extract a boolean from the JSON filing, at the provided path.
def get_bool(filing: Dict, path: str) -> str: try: raw = dpath.util.get(filing, path) return bool(raw) except (IndexError, KeyError, TypeError, ValueError): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boolean_value(cls, json_field: str, value: bool) -> \"JsonPattern\":\n return jsii.sinvoke(cls, \"booleanValue\", [json_field, value])", "def getBool( self, par, path ):\n\n return self.db.getBoolPar( par, path )", "def is_json_path(location):\n if filetype.is_file(location):\n try:\n with open(location) as jsonfile:\n result = simplejson.load(jsonfile)\n if result:\n return True\n except:\n return False\n return False", "def _parse_boolean(node, key):\n element = node.get(key)\n if element is not None:\n return bool(element)\n else:\n return None", "def _parse_boolean(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n if element.text == 'true':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None", "def isjson(filepath):\n return filepath.lower().endswith('.json')", "def parse_json_format(file_path=None):\n is_file_res = check_is_file(file_path)\n if is_file_res['result']:\n with open(file_path) as f:\n if f.readline().strip().startswith('['):\n return generate_response(result='jsonl')\n return generate_response(result='json')\n else:\n return is_file_res", "def fromJSON(self, path='') -> dict:\n try:\n return(importJSON(path))\n except Exception as error:\n print(f\"Error: self.fromJSON({path}) -> {error}\")", "def extract_value(self, json_body):\n # Extract\n res = next(iter(jmespath.search(JMESPATH_BASE, json_body)))\n\n try:\n res = res[self.key]\n except (KeyError, TypeError):\n _LOGGER.warning(\"Sensor %s not found in %s\", self.key, res)\n self.value = None\n return False\n\n if self.path is None:\n # Try different methods until we can decode...\n _paths = [JMESPATH_VAL, JMESPATH_VAL_IDX.format(self.key_idx)]\n while _paths:\n _path = _paths.pop()\n _val = jmespath.search(_path, res)\n if _val:\n _LOGGER.debug(\"Extracting %s using %s\", self.name, _path)\n self.path = _path\n break\n\n # Extract new value\n if self.path is None:\n _LOGGER.debug(\"Null path %s\", res)\n res = None\n else:\n res = jmespath.search(self.path, res)\n\n if isinstance(res, int) and self.factor:\n res /= self.factor\n try:\n return res != self.value\n finally:\n self.value = res", "def get_bool(self, item: str) -> bool:\n return as_bool(self[item])", "def _parse_bool(line):\n return line in ('true', 'True', '1')", "def getbool(self, key):\n try:\n return self.parser.getboolean(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def exists(obj: Dict, path: str) -> bool:\n\n return get(obj, path) is not None", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def jsonpath(self, path, patterns=[], queries=[], use_json=True):\n import hxl.filters\n return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json)", "def parse_json_from_path(self, infile_path):\r\n with open(infile_path, 'r') as infile:\r\n return self.parse_from_json(infile)", "def _unpack(self, key, result, path):\n status = result and result.get('status')\n if type(status) == dict and status.get('code') == 200:\n return key and result.get(key)\n else:\n log.warn('Trove API return status %s for path %s', status, path)\n return None", "def Pre(self, path, **unused_kwargs):\n return hooks.TitanMethodResult(bool(files.Get(path)))", "def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise", "def apply_filter(json_arg, filtering_line):\n\n logging.info(\"apply_filter:starting. jsonPath filter=[%s]\", filtering_line)\n\n res = jsonpath(json_arg, filtering_line, result_type=\"PATH\")\n if isinstance(res, types.BooleanType) or len(res) == 0:\n logging.info(\"apply_filter: The prefilter [%s] matched nothing\", filtering_line)\n return json_arg\n if len(res) > 1:\n raise AssertionError(\n \"Bad pre-filter [%s] (returned [%d] entries, should return one at most\",\n filtering_line,\n len(res),\n )\n as_json_patch = from_path_to_jsonpatch(res[0])\n logging.info(\"apply_filter: applying patch! resolved patch =%s\", as_json_patch)\n patched_json = jsonpatch.apply_patch(json_arg, as_json_patch)\n\n logging.info(\"apply_filter: json after patching: %s\", patched_json)\n return patched_json", "def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']", "def read_key_bool(op, key):\n if key in op:\n assert isinstance(op[key], bool), 'must be bool: %s' % key\n return op[key]\n return None", "def load_contains(path: str) -> Dict[str, List[str]]:\n with open(path, 'r', encoding='utf8') as f:\n return json.load(f)", "def _get_json(self, path):\n cur_dir = path_stroke_fix(path)\n path = f\"{cur_dir}config/config.json\"\n return json.load(open(path, 'r'))", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def choose_json():\n url = mock_http_get.call_args[0][0]\n if url.endswith(\".expanded.json\"):\n return dtdl_expanded_json\n else:\n return dtdl_json", "def __nonzero__(self):\n return any(self.path)", "def GetFileEntryByPath(self, path):\n if not self._file_entries:\n return False\n\n return self._file_entries.get(path, None)", "def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json" ]
[ "0.6071287", "0.57850856", "0.5647551", "0.55946714", "0.5420584", "0.5406428", "0.5285786", "0.5227173", "0.52162", "0.51716995", "0.5158244", "0.5113517", "0.51068", "0.50923777", "0.509074", "0.5047108", "0.50103277", "0.49863604", "0.49533832", "0.4951799", "0.49441293", "0.49369016", "0.49364677", "0.4921997", "0.48962516", "0.48952955", "0.48807025", "0.48756555", "0.4873567", "0.4871256" ]
0.7121972
0
quote src to string
def _quote(src, encoding="utf-8"): if isinstance(src, unicode): src = src.encode(encoding) return urllib.quote(src)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline", "def Sourceify(path):\n return path", "def data_for_src(self, file_id):\n data, metadata = self.load(file_id, True)\n return \"data:image/gif;base64,%s\" % data.encode('base64')", "def get_src(text, base_url=None):\n # get the value of SRC in an IMG tag, or None if not found\n m = re.search(r'src\\s*=\\s*\"?([^>\" ]+)\"?', text, re.I)\n if not m:\n return None\n link = m.group(1)\n if base_url and not link.lower().startswith(\"http\"):\n import urlparse\n link = urlparse.urljoin(base_url, link)\n return link", "def path(self) -> str:\n return self.src + \"/\"", "def safe_filename(src: str):\n return \"\".join(\n (c if (c.isalnum() or c in _safe_chars) else ' ')\n for c in src).strip()", "def src(self, src):\n\n self._src = src", "def src_name(self) -> str:\n return self._src_name", "def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html", "def getSrcString(self):\n srcString = 'c -------------------------- Source Defination ----------------------------\\n'\n srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \\n'\n srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \\n'\n srcString += 'si1 0 2.510E-04 \\n'\n srcString += 'sp1 -21 1 \\n'\n return srcString", "def font_src(self, name, extensions):\n def _generate_src():\n \"\"\" generate the src strings\n \"\"\"\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))\n\n return \",\".join([i for i in _generate_src()])", "def test_raw_static_check():\r\n path = '\"/static/foo.png?raw\"'\r\n assert_equals(path, replace_static_urls(path, DATA_DIRECTORY))\r\n\r\n text = 'text <tag a=\"/static/js/capa/protex/protex.nocache.js?raw\"/><div class=\"'\r\n assert_equals(path, replace_static_urls(path, text))", "def _get_image_url_in_content(self, content):\n begin_token = 'src=\"'\n begin = content.find(begin_token)\n if begin == -1:\n return None\n\n # Acrescentamos o tamanho do 'begin_token' no 'begin'\n begin += len(begin_token)\n end = content.find('\"', begin)\n url = content[begin:end]\n return url.split('?')[0]", "def _str_eval_img(eval, act, ctxt, *obs) :\n from PIL import Image\n import os.path\n filename = obs[0][0]\n css_class = \"desc_img\"\n if len(obs)>1 and obs[1][0] == \"left\" :\n css_class = \"desc_img_left\"\n width, height = Image.open(os.path.join(os.path.abspath(\"games/teptour_files\"), filename)).size\n print \"Image\",filename,\"is\",width,\"x\",height\n return [\"<img class=\\\"\"+css_class+\"\\\" width=\\\"\"+str(width)+\"\\\" height=\\\"\"+str(height)+\"\\\" src=\\\"teptour/\"+filename+\"\\\">\"]", "def source(request, filepath_pseudos):\n filepath_pseudo = pathlib.Path(filepath_pseudos()) / 'Ar.upf'\n\n if request.param is str:\n return str(filepath_pseudo)\n\n if request.param is pathlib.Path:\n return filepath_pseudo\n\n return io.BytesIO(filepath_pseudo.read_bytes())", "def getSource():", "def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)", "def escape(orig):\n return '\"{}\"'.format(orig.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"'))", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def un_src(self):\n if self.src is None:\n return\n self.inline = '''\n var script = document.createElement('script');\n script.src = \"%s\";\n document.body.appendChild(script);\n''' % self.src\n self.src = None", "def src(self) -> str:\n return self._branch + self._id", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)", "def format_source(src_filename, src, annotation_set, tpl,\n web_path, index_path):\n rw = Rewriter(src)\n sanitize_code_as_html(rw)\n annotation_set.apply(rw)\n code = '\\n'.join(rw.lines)\n\n return tpl.substitute(filename=src_filename,\n web_path=web_path,\n code=code,\n index_path=index_path)", "def unsafe_url(**options):\n return f\"unsafe/{plain_image_url(**options)}\"", "def load_file(self, src: str) -> bytes:\n if re.match(\"https?://\", src):\n content = self.load_file_from_url(src)\n else:\n content = self.load_file_from_folders(src)\n return content", "def escape_filename(fn):\n return ''.join(filter(valid_chars.__contains__, fn))", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def _unquote(src, encoding=\"utf-8\"):\n return urllib.unquote(src).decode(encoding)", "def load_url(src):\n return LOAD(url=src)" ]
[ "0.5974841", "0.58601075", "0.5774958", "0.5739309", "0.561575", "0.5580705", "0.5553651", "0.5544228", "0.5541506", "0.548759", "0.5487165", "0.54729", "0.5467639", "0.54428", "0.54321027", "0.5386833", "0.53533804", "0.53520805", "0.5338085", "0.53334767", "0.5330983", "0.529171", "0.5286314", "0.5273227", "0.5272518", "0.52653354", "0.52246195", "0.52246195", "0.52112246", "0.5209895" ]
0.6619573
0
Return the expected keys for the log entry.
def expected_log_keys(learner: adaptive.BaseLearner) -> list[str]: # Check if the result contains the expected keys expected_keys = [ "elapsed_time", "overhead", "npoints", "cpu_usage", "mem_usage", ] if not _at_least_adaptive_version("0.16.0", raises=False) and not isinstance( learner, adaptive.SequenceLearner, ): # The loss cache for SequenceLearner was introduced in adaptive 0.16.0 # see https://github.com/python-adaptive/adaptive/pull/411 expected_keys.append("latest_loss") return expected_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self, entry_type:str, x:str):\n keys = set()\n x = self._decode(x)\n\n for log in self.logs:\n for datum in log[entry_type]:\n res = self._follow(datum, x)\n\n if type(res) == dict:\n for key in res.keys():\n keys.add(key)\n elif type(res) == list:\n keys.add('< %d' % len(res))\n \n return list(keys)", "def provideExpectedMetaKeys(self):\n return self.metadataKeys, self.metadataParams", "def keys():", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def get_keys(self):\n self._logger.info(ME + '.get_keys()')\n\n tmp_primary_keys = []\n tmp_data_keys = []\n try:\n tmp_primary_keys = config.get(ME, 'primary_keys').split(',')\n tmp_data_keys = config.get(ME, 'data_keys').split(',')\n self.index_key = config.get(ME, 'index_key') #FIXME: this is bad\n except KeyError as error_msg:\n self._logger.error(\n 'EXCEPTION: Keys missing' +\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key),\n exc_info=True\n )\n raise Connection.TableKeysMissing(error_msg, ME)\n\n self._logger.debug(\n 'keys validated:' + \\\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key)\n )\n return tmp_primary_keys, tmp_data_keys", "def check_keys(self):", "def AllKeys(self) -> _n_0_t_1[str]:", "def keys(self):\n return", "def Keys(self) -> _n_1_t_4:", "def keys(self) -> Sequence[str]:\n raise NotImplementedError", "def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keys(self):\n raise NotImplementedError", "def valid_config_keys():\n click.echo(', '.join(get_class_properties(GenConfig)))", "def test_log_key(self):\n kwargs = {\n \"follow_up\": None,\n \"notes\": \"This is a test.\",\n \"gdoc_link\": \"\",\n \"state_id\": \"KS\",\n \"contact\": None,\n \"user_id\": 9,\n \"formal_request\": False,\n \"date\": \"2013-03-28\",\n \"org_id\": 15,\n \"subject\": \"Test subject line\"\n }\n log = Log(**kwargs)\n log.save()\n self.assertEqual(log.log_key(), ('KS', '2013-03-28', 'Test subject line'))\n self.assertEqual(log.log_key(as_string=True), 'KS - 2013-03-28 - Test subject line')\n\n # Test with contact\n contact = Contact.objects.all()[0]\n log.contact = contact\n log.save()\n expected = (\n 'KS',\n '2013-03-28',\n u'Williams (Kansas Secretary of State elections division)',\n 'Test subject line',\n )\n self.assertEqual(expected, log.log_key())", "def _extra_keys(self):\r\n return []", "def keys(self) -> t.Tuple[str, ...]:\n return self._keys", "def get_group_keys(self):\r\n if len(self.conflicting_exclusives) == 0:\r\n return [\"<none>\"]\r\n else:\r\n return self.key_to_targets.keys()", "def _get_expected_sections(self):\n\n with open(self.filename, \"r\") as fh:\n yaml = ruamel.yaml.YAML(typ=\"unsafe\", pure=True)\n data = yaml.load(fh.read())\n keys = list(data.keys())\n return keys", "def transform_info_keys():\n return (TraceKeys.CLASS_NAME, TraceKeys.ID, TraceKeys.TRACING, TraceKeys.DO_TRANSFORM)", "def getDiscoveredMetricsKeys(self):\n return tuple(self.__foundMetrcsKeySet)", "def keys(self) -> List:\n pass", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def keys(self) -> tuple(Any, ...): # type: ignore\n return tuple(self.contents.keys())", "def state_info_keys(self):\n return [k for k, _ in self.state_info_specs]", "def _get_missing_keys(self):\n REQUIRED_KEYS = [\n 'date_purchased', 'cost', 'supply_type_id'\n ]\n\n return [key for key in REQUIRED_KEYS if not key in self.request.data]", "def key_attributes(self):\n\n return [level.key for level in self.levels]", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def valid_config_keys():\n click.echo(', '.join(get_class_properties(OffshoreInputsConfig)))" ]
[ "0.6501111", "0.62000287", "0.61273104", "0.61169225", "0.6086626", "0.60766816", "0.60219604", "0.6018274", "0.5984301", "0.5967556", "0.5930063", "0.5905795", "0.5868908", "0.586158", "0.5827901", "0.57728463", "0.5768729", "0.5760981", "0.5749968", "0.5745742", "0.5743827", "0.5736735", "0.5705893", "0.5696833", "0.56902754", "0.568159", "0.56736904", "0.56550694", "0.5620599", "0.56125647" ]
0.71414775
0
Return the Spacy processed sentences in which the element is positioned.
def get_processed_sentence(self): if not hasattr(self, "in_sentence"): raise AttributeError(f"{self} does not have attribute 'in_sentence'.") try: sen_ixs = [sen.element_id for sen in self.in_sentence] except TypeError as e: sen_ixs = [self.in_sentence.element_id] sen_procs = [ s for i, s in enumerate(self.in_document.sentences_processed) if i in sen_ixs ] return sen_procs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def get_sentences(self):\n return [s for s in self.text.split('\\n')]", "def get_processed_sentences(self):\n\n self.__resolve_coreference()\n self.__tokenize_sentences()\n\n return self.tokenized_sentences", "def sentences(self, tag=False, tag_method=None):\n self.__set_text_node(self.root_)\n sentence_nodes = filter(lambda n: n.nodeType == n.ELEMENT_NODE and n.tagName == 's',\n list(self.text_node.childNodes))\n sentences = []\n for s in sentence_nodes:\n current = []\n TimeMLDoc.__get_text(s, current, False)\n #print(current)\n if not tag:\n sentences.append(''.join([ c[0] for c in current]))\n else:\n sentences.append(tag_method(current))\n return sentences", "def get_sentences(self):\n return self._sentences", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def pos(self):\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]", "def pos(self):\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]", "def sentences(self) -> List[str]:\n\t\treturn [sentence for sentence in re.split('(?<=[.!?])', self.text)]", "def get_sentence(self):", "def sentence_segment(self, doc, candidate_pos, lower):\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences", "def gather_sentences(self):\n sentences = Sentence.objects.all()\n return sentences", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def collect_sentences(self):\n sentences = []\n for document in self.documents:\n for sentence_token in document.sentences:\n sentences.append(sentence_token)\n return sentences", "def get_sentences(text):\n \n return text.split('.')", "def getTrueSentences(self, form):", "def return_augmented_sentences(self) -> list:\n return self.augmented_sentence_list", "def gather_sentences(self):\n if self.company_name:\n companies = Company.objects.filter(name__contains=self.company_name)\n dpefs = DPEF.objects.filter(company__in=companies)\n sentences = Sentence.objects.filter(dpef__in=dpefs).all()\n else:\n sentences = Sentence.objects.none() # TODO: can be set to none later when all works\n return sentences", "def split_sentences(self, text):\n assert isinstance(text, str)\n text = text.replace('\\n', '')\n\n if text.strip() == '':\n return []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n sentences = []\n for sentence in output['sentences']:\n num_token = len(sentence['tokens'])\n start_index = sentence['tokens'][0]['characterOffsetBegin']\n end_index = sentence['tokens'][num_token - 1]['characterOffsetEnd']\n sentences.append(text[start_index:end_index])\n return sentences", "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def sentences(summary, nlp):\n text = remove_spurious_words(text_of(summary))\n all_sentence = [sentence for sentence in re.split(\"[。,?!\\n]\", text) if sentence]\n all_sentence = [re.sub('[ ]+', ' ', sentence.encode('gb2312', 'ignore').decode('gb2312')).strip() for sentence in\n all_sentence]\n return [nlp.ner(sentence) for sentence in all_sentence if sentence]", "def get_sentences(self, text_to_parse):\r\n simple_sentence, *rest = text_to_parse.split('(', 1)\r\n kana_sentence, *rest = rest[0].split(')', 1)\r\n return simple_sentence.strip(), kana_sentence.strip().replace(' ', '')", "def get_sentences(text, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n processed_sentences = [convert_to_string(remove_junk(tokenize_text(sentence, nlp))) for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n # convert the sentences into a list of document vectors\n sentence_vector_list = [nlp(sentence).vector for sentence in processed_sentences]\n\n return sentences, sentence_vector_list", "def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]", "def get_sentence_sentiments(comment):\n sentence_score_list = []\n\n split_comment = make_sentences(comment)\n for sentence in split_comment:\n if sentence == ' ' or sentence == '' or sentence == ' ':\n continue\n text = flair.data.Sentence(sentence)\n flair_sentiment.predict(text)\n\n value = text.labels[0].to_dict()['value']\n if value == 'POSITIVE':\n result = text.to_dict()['labels'][0]['confidence']\n else:\n result = -(text.to_dict()['labels'][0]['confidence'])\n\n sentence_score = round(result, 6)\n sentence_score_list.append(sentence_score)\n\n return sentence_score_list", "def extract_sentences_from_text(self, text_data):\n pass", "def getConstantSentenceForms(self):", "def split(text):\n doc = nlp(text)\n sentences = [x.text_with_ws for x in doc.sents]\n return sentences", "def get_sentences(self):\n for tree in self.tree_generator():\n yield tree[\"title\"] + \" \" + tree[\"selftext\"]\n for _, comment in tree[\"comments\"].items():\n yield comment[\"body\"]" ]
[ "0.69350046", "0.69350046", "0.6880929", "0.68049884", "0.6687255", "0.6577726", "0.64769137", "0.6395686", "0.6395686", "0.6374306", "0.63575345", "0.6353244", "0.6340651", "0.62586695", "0.6229776", "0.622054", "0.6184957", "0.61654425", "0.6101179", "0.6096108", "0.60736525", "0.5989865", "0.5985511", "0.5948307", "0.59462005", "0.59279954", "0.5926191", "0.58984786", "0.5885713", "0.5880664" ]
0.7071078
0
Check if the annotation is pronominal by full parsed PoS tag. All tokens should be pronominal (works best for anaphoric pronominal mentions as intended).
def check_pronominal(self): pronom_tags = ["PRP", "PRP$", "WDT", "WP", "WP$"] token_procs = self.get_processed_tokens() all_pronom = all( t.tag_ in pronom_tags for t in token_procs ) # True if all tokens are pronom_tags # print(f"{' '.join(t.text + '.' + t.tag_ for t in token_procs)}: Pronominal = {all_pronom}") return all_pronom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contains_pronoun(cluster):\n for mention in cluster:\n if any([w.tag_.startswith(\"PRP\") for w in mention]):\n # Found a mention with a pronoun\n return True\n return False", "def find_pronoun(sent):\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n # pronoun = 'I' +++++++++ORIGINAL++++++++++++\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n # pronoun = 'You' +++++++++ORIGINAL++++++++++++\n pronoun = 'You'\n return pronoun", "def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']", "def is_pronounced(word):\n pronounced = [\"pronounced\", \"pronunciation\", \"pronounsed\", \"pronouced\", \"pronouned\", \\\n \"pronounciated\", \"prenounced\", \"prounouced\", \"pernounced\", \"purnounced\", \\\n \"pronoused\", \"pronuced\", \"pronunced\", \"pronnounced\", \"pronanced\", \\\n \"prononced\", \"prounounced\", \"prononsed\", \"prononuced\", \"pernunciation\", \\\n \"prononciation\", \"prounciation\", \"pronouciation\", \"pronounciated\", \\\n \"pronounciation\", \"pronanciation\", \"prononcation\", \"pernounciation\", \\\n \"prononceation\", \"prenunciation\", \"prononseation\", \"prounouciation\", \\\n \"pronuniation\", \"pronunication\", \"prenounciation\", \"pronuntiation\", \\\n \"pronuncition\", \"pronociation\", \"prenunsiation\", \"pronounsation\", \\\n \"pronounceation\", \"pronounication\", \"pronauciation\", \"pronounciacion\", \\\n \"pronounsiation\"]\n for p in pronounced:\n if word.lower() == p:\n return True\n return False", "def is_sonnet(poem):\n return len([line for line in poem.split(\"\\n\") if line]) == 14", "def verifyProperNounAtSentenceStart(idx, tagged_term, tagged_terms, lexicon):\n term, tag, norm = tagged_term\n if (tag in ('NNP', 'NNPS') and\n (idx == 0 or tagged_terms[idx-1][1] == '.')):\n lower_term = term.lower()\n lower_tag = lexicon.get(lower_term)\n if lower_tag in ('NN', 'NNS'):\n tagged_term[0] = tagged_term[2] = lower_term\n tagged_term[1] = lower_tag", "def _entry_morphophonemics_annotation_is_valid(entry: _LexiconEntry) -> None:\n compound = _is_compound_of(entry)\n morphophonemics = _morphophonemics_of(entry)\n\n if compound == \"true\" and morphophonemics == \"~\":\n raise InvalidLexiconEntryError(\n \"Entry is marked as ending with compounding marker but it is missing\"\n \" morphophonemics annotation.\")", "def is_noun(tag_string):\n result = True if tag_string in POS.POS_tags.noun_tags else False\n return result", "def is_adjective(tag):\r\n return tag in ['JJ', 'JJR', 'JJS']", "def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result", "def is_annotated(page_id, line_n, token, annotations):\n positions = [p for a in annotations for p in a[\"positions\"]]\n for position in positions:\n if(position[\"page_id\"]==page_id):\n if(position[\"line_n\"]==line_n):\n if(token[\"offset_start\"] >= position[\"start\"]):\n if(token[\"offset_end\"] <= position[\"end\"]):\n return True\n return False", "def _is_annotated(nodes: List[Node]):\n annotated = False\n for node in nodes:\n annotated = annotated or (\n \"quantization_annotation\" in node.meta\n and node.meta[\"quantization_annotation\"]._annotated\n )\n return annotated", "def _check_personal_pronouns(pronoun: str, last_nouns: list) -> list:\n pronoun_details = []\n pronoun_lower = pronoun.lower()\n if pronoun == 'I' or pronoun_lower in ('me', 'myself', 'my'):\n pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))\n elif pronoun_lower in ('we', 'us', 'ourselves', 'our'):\n # Find singular or plural person nouns (any gender)\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, True))\n pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))\n elif pronoun_lower in ('they', 'them', 'themselves', 'their'):\n # Give preference to persons (any gender or number)\n noun_list = _check_criteria(pronoun_lower, last_nouns, None, None, True)\n if noun_list:\n pronoun_details.extend(noun_list)\n else:\n # Check for non-persons\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, False))\n elif pronoun_lower in ('she', 'herself', 'her'):\n # Find singular, feminine, person nouns\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, True, True))\n elif pronoun_lower in ('he', 'himself', 'him'):\n # Find singular, masculine, person nouns\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, False, True))\n elif pronoun_lower in ('it', 'itself', 'its'):\n # Find singular, non-person nouns (no gender)\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, None, False))\n final_details = [] # May be duplicates in the list due to duplicates in last_nouns\n for pronoun_detail in pronoun_details:\n if pronoun_detail in final_details:\n continue\n final_details.append(pronoun_detail)\n return final_details", "def nao_tem_passageiros(self):\n return self.counter.ja_viajaram == self.counter.num_passageiros", "def _is_opinion_mod(token: tokens.Token) -> bool:\n is_mod = token.dep_ in {\"amod\", \"advmod\"}\n is_op = token.text.lower() in _OPINION_WORDS\n return is_mod and is_op", "def is_lyrics_approved():", "def is_psionic(self) -> bool:\n return ATTRIBUTE.Psionic.value in self.type_data.attributes", "def rule_vb_prp_nn(words):\n if index_tag_seq(words, ['VB', 'PRP$', 'NNS', 'RB']) > -1:\n return move_tag_seq(words, ['VB', 'PRP$', 'NNS'], 'end')\n return None", "def check_pra_symbol(symbol):\n # Platts\n if len(symbol) == 7 and symbol[:2] in [\n 'PC', 'PA', 'AA', 'PU', 'F1', 'PH', 'PJ', 'PG', 'PO', 'PP', ]:\n return True\n\n # Argus\n if '.' in symbol:\n sm = symbol.split('.')[0]\n if len(sm) == 9 and sm.startswith('PA'):\n return True\n\n return False", "def isOk(annots):\n if annots == []:\n return True\n for a in annots:\n for label in a.labels:\n if (label != 'hasOkCopies' and\n label != 'hasBadCopies' and\n not label.startswith('count_')):\n return False\n return True", "def check_if_extra_pronoun(pronoun_dict):\n for key, value in sorted(pronoun_dict.iteritems(), key=lambda (k,v): (v,k), reverse = True):\n skip = False\n for pronoun_list in all_pronouns:\n if key in pronoun_list:\n skip = True\n break\n if not skip:\n print \"%s: %s\" % (key, value)", "def __isNoun__(self, word):\n self.nouns = ('door', 'bear', 'princess', 'cabinet')\n for noun in self.nouns:\n if noun == word:\n return ('noun', word), True\n return None, False", "def isDisambiguatedByNextNoun(self, word):\n\t\treturn 'noun' in disambig_const.DISAMBIGUATATION_TABLE.get(word, {});", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def are_present_quiz(verb, pronoun):\n\n return functions.conjugate_present_are_verb(verb, pronoun, \"presente\")", "def _does_token_overlap_with_annotation(\n token: Token, annot_start: int, annot_end: int\n) -> bool:\n\n return (\n annot_start <= token.idx <= annot_end\n or token.idx <= annot_start <= token.idx + len(token)\n )", "def check_if_replacable(self, word):\n word_tag = pos_tag([word])\n if 'NN' in word_tag[0][1] or 'JJ' in word_tag[0][1] or 'VB' in word_tag[0][1]:\n return True\n else:\n return False", "def additional_text_preprocessing_with_pos(pos_dict):\n\n tags_to_lemmatize = ['a', 'n', 'v', 'r']\n\n pos_dict = TextPreprocessor.find_named_entities(pos_dict)\n if pos_dict is None:\n return None, None\n prepro = list()\n contains_spelling_mistake = False\n for t in pos_dict:\n token = t['token']\n tag = t['tag'].lower()\n if token not in TextPreprocessor.PUNCTUATION and tag != \",\":\n\n token = TextPreprocessor.replace_user_mentions(token)\n token = TextPreprocessor.replace_urls(token)\n replaced = [token]\n for i in replaced:\n\n i = TextPreprocessor.replace_all_punctuation(i)\n if i.lower() not in TextPreprocessor.STOPWORDS and i != 'URL' and i!= 'USERMENTION':\n if i != \"\" and not re.match('\\B#\\w*[a-zA-Z]+\\w*', i):\n before = i\n i = TextPreprocessor.SPELL_CHECKER.correct(i, tag)\n if i != before:\n contains_spelling_mistake = True\n if tag in tags_to_lemmatize:\n i = TextPreprocessor.lemmatize(i, tag)\n i = TextPreprocessor.stem(i, tag)\n # check again, since stemming, lemmatization or spelling correction can produce stopwords\n # if i.lower() not in TextPreprocessor.STOPWORDS:\n if i != 'URL' and i!= 'USERMENTION' and i!='':\n i = i.lower()\n if re.match(\".*[a-zA-Z]'\", i):\n i = i[:-1]\n prepro.append(i)\n return prepro, contains_spelling_mistake", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def test_spires_syntax_detected_naked_author_leading_spaces(self):\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\" author ellis\")\n self.assertEqual(spi_search, True)" ]
[ "0.61451167", "0.58428335", "0.5761659", "0.55204415", "0.542974", "0.53121424", "0.52965194", "0.5273935", "0.5249292", "0.5238592", "0.52133167", "0.5151928", "0.5150011", "0.5145963", "0.5134433", "0.5119308", "0.5116591", "0.50993526", "0.5094933", "0.5078213", "0.50759107", "0.5022919", "0.5019613", "0.49810898", "0.4968922", "0.49677217", "0.49573064", "0.49554756", "0.4926266", "0.49099398" ]
0.72758985
0
Get a list of token objects for the event extent. The extent can be set to include discontiguous_triggers, participants, and/or fillers. Setting this to an empty list will only return the original In the definition of an event nugget we include all of these.
def get_extent_tokens(self, extent=["discontiguous_triggers"], source_order=True): all_tokens = self.tokens.copy() core_sen_idx = all_tokens[0].in_sentence.element_id # to ensure discont is in same sentence for ext in extent: if getattr(self, ext): all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens if t.in_sentence.element_id == core_sen_idx) all_tokens = list( set(all_tokens) ) # this is necessary because trigger and participant spans can overlap if source_order: # return tokens in the order they appear in source text all_tokens.sort(key=lambda x: x.begin) return all_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_extent_tokens(self, extent=[], source_order=True):\n all_tokens = self.tokens.copy()\n\n for ext in extent:\n if getattr(self, ext):\n all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens)\n\n all_tokens = list(\n set(all_tokens)\n ) # this is necessary because trigger and participant spans can overlap\n if source_order: # return tokens in the order they appear in source text\n all_tokens.sort(key=lambda x: x.begin)\n\n return all_tokens", "def get_extent_token_ids(self, **kwargs):\n token_span = self.get_extent_tokens(**kwargs)\n return [t.index for t in token_span]", "def get_tokens(self):\r\n return TokenGroup.get_tokens(self._tu, self.extent)", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def get_tokens(tu, extent):\r\n tokens_memory = POINTER(Token)()\r\n tokens_count = c_uint()\r\n\r\n conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),\r\n byref(tokens_count))\r\n\r\n count = int(tokens_count.value)\r\n\r\n # If we get no tokens, no memory was allocated. Be sure not to return\r\n # anything and potentially call a destructor on nothing.\r\n if count < 1:\r\n return\r\n\r\n tokens_array = cast(tokens_memory, POINTER(Token * count)).contents\r\n\r\n token_group = TokenGroup(tu, tokens_memory, tokens_count)\r\n\r\n for i in xrange(0, count):\r\n token = Token()\r\n token.int_data = tokens_array[i].int_data\r\n token.ptr_data = tokens_array[i].ptr_data\r\n token._tu = tu\r\n token._group = token_group\r\n\r\n yield token", "def get_tokens(self):\n def _traverse_preorder(cursor, token_list): # There is a method called \"walk_preorder\" in Cursor class. Here we need to ignore some subtrees so we implement on our own.\n if cursor.location.file and cursor.location.file.name != self.filepath: # exclude \"#include <...>\"\n return\n if (cursor.kind, cursor.spelling) in exclude_types: # exclude node in 'exclude_types'\n return\n \n token_list.append(cursor)\n for child in cursor.get_children():\n _traverse_preorder(child, token_list)\n\n tokens = []\n _traverse_preorder(self.cursor, tokens)\n return tokens", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def get_tokens(self) -> List[str]:\n return self.tokens", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def get_tokens(self, locations=None, extent=None):\r\n if locations is not None:\r\n extent = SourceRange(start=locations[0], end=locations[1])\r\n\r\n return TokenGroup.get_tokens(self, extent)", "def get_gene_tokens(self, classification: Classification) -> List[GeneToken]:\n return self.get_protein_gene_symbol_tokens(classification)", "def get_tokens(self):\r\n return self.token_set", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self.__tokens", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def special_tokens(self) -> List[Hashable]:\n return list(self._special_token_kv.values())", "def get_tokens(self, node, include_extra=False):\n # type: (AstNode, bool) -> Iterator[Token]\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)", "def extent(self):\r\n return conf.lib.clang_getTokenExtent(self._tu, self)", "def extent(self) -> typing.Tuple[str, ...]:\n return self._extent.members()", "def tokens(self):\n tokens = [k for k in self.tok2ind.keys()\n if k not in {'<NULL>', '<UNK>'}]\n return tokens", "def tokens(self):\n return self._sentrep.tokens()", "def all_tokens(self) -> List[Hashable]:\n return self._all_tokens", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def tokens(self) -> list:\n if self._tokens is None:\n tokens_ = sorted(list(self.elements()))\n self._tokens = tokens_\n return self._tokens", "def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()", "def get_token_types() -> List[type]:\n return ExtensionTokenTypes.__TOKEN_TYPES", "def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens" ]
[ "0.77589923", "0.6758597", "0.64047307", "0.6147929", "0.60916317", "0.6056437", "0.5989617", "0.58163434", "0.58126074", "0.56831574", "0.567847", "0.56537056", "0.55833375", "0.54851276", "0.54851276", "0.54851276", "0.5463045", "0.5441857", "0.54370433", "0.53718114", "0.5361964", "0.5328001", "0.53169554", "0.5305214", "0.5298392", "0.5282702", "0.528012", "0.5275274", "0.5257753", "0.52499425" ]
0.7712019
1
Get a list of token ids for the event extent. Relies on Event.get_extent_tokens() for fetching the tokens.
def get_extent_token_ids(self, **kwargs): token_span = self.get_extent_tokens(**kwargs) return [t.index for t in token_span]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_extent_tokens(self, extent=[], source_order=True):\n all_tokens = self.tokens.copy()\n\n for ext in extent:\n if getattr(self, ext):\n all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens)\n\n all_tokens = list(\n set(all_tokens)\n ) # this is necessary because trigger and participant spans can overlap\n if source_order: # return tokens in the order they appear in source text\n all_tokens.sort(key=lambda x: x.begin)\n\n return all_tokens", "def get_extent_tokens(self, extent=[\"discontiguous_triggers\"], source_order=True):\n all_tokens = self.tokens.copy()\n core_sen_idx = all_tokens[0].in_sentence.element_id # to ensure discont is in same sentence\n\n for ext in extent:\n if getattr(self, ext):\n all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens if t.in_sentence.element_id == core_sen_idx)\n\n all_tokens = list(\n set(all_tokens)\n ) # this is necessary because trigger and participant spans can overlap\n if source_order: # return tokens in the order they appear in source text\n all_tokens.sort(key=lambda x: x.begin)\n\n return all_tokens", "def get_tokens(self):\r\n return TokenGroup.get_tokens(self._tu, self.extent)", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def get_tokens(self) -> List[str]:\n return self.tokens", "def get_ids(self,tokens, tokenizer, max_seq_length):\n token_ids = tokenizer.convert_tokens_to_ids(tokens,)\n input_ids = token_ids + [0] * (max_seq_length-len(token_ids))\n return input_ids", "def keys(self):\n return list(self.token2id.values())", "def get_gene_tokens(self, classification: Classification) -> List[GeneToken]:\n return self.get_protein_gene_symbol_tokens(classification)", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def get_tokens(tu, extent):\r\n tokens_memory = POINTER(Token)()\r\n tokens_count = c_uint()\r\n\r\n conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),\r\n byref(tokens_count))\r\n\r\n count = int(tokens_count.value)\r\n\r\n # If we get no tokens, no memory was allocated. Be sure not to return\r\n # anything and potentially call a destructor on nothing.\r\n if count < 1:\r\n return\r\n\r\n tokens_array = cast(tokens_memory, POINTER(Token * count)).contents\r\n\r\n token_group = TokenGroup(tu, tokens_memory, tokens_count)\r\n\r\n for i in xrange(0, count):\r\n token = Token()\r\n token.int_data = tokens_array[i].int_data\r\n token.ptr_data = tokens_array[i].ptr_data\r\n token._tu = tu\r\n token._group = token_group\r\n\r\n yield token", "def convert_tokens_to_ids(self, tokens):\n ids = []\n if isinstance(tokens, str):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, self.unk_id)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, self.unk_id))\n return ids", "def get_tokens(self):\r\n return self.token_set", "def GetCharacterIds(self):\n rv = []\n for charEntry in ClientAPI.GetCharacterEntries():\n rv.append(charEntry.CharacterId)\n return rv", "def additional_special_tokens_ids(self):\n return [self.token_to_id(token) for token in self.additional_special_tokens]", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def tokens(self):\n return self.__tokens", "def get_tokens(self, locations=None, extent=None):\r\n if locations is not None:\r\n extent = SourceRange(start=locations[0], end=locations[1])\r\n\r\n return TokenGroup.get_tokens(self, extent)", "def get_ids(self) -> List[str]:", "def convert_tokens_to_ids(self, tokens):\n ids = []\n for token in tokens:\n ids.append(self.vocab[token])\n if len(ids) > self.max_len:\n logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this BERT model ({} > {}). Running this sequence through BERT will result in indexing errors'.format(len(ids), self.max_len))\n return ids", "def get_tokens(self):\n def _traverse_preorder(cursor, token_list): # There is a method called \"walk_preorder\" in Cursor class. Here we need to ignore some subtrees so we implement on our own.\n if cursor.location.file and cursor.location.file.name != self.filepath: # exclude \"#include <...>\"\n return\n if (cursor.kind, cursor.spelling) in exclude_types: # exclude node in 'exclude_types'\n return\n \n token_list.append(cursor)\n for child in cursor.get_children():\n _traverse_preorder(child, token_list)\n\n tokens = []\n _traverse_preorder(self.cursor, tokens)\n return tokens", "def extent(self):\r\n return conf.lib.clang_getTokenExtent(self._tu, self)", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids", "def get_identifiers(self) -> list[str]:\n output: list[str] = self._template.get_identifiers()\n return output" ]
[ "0.7306065", "0.7179015", "0.67308676", "0.66366893", "0.6510626", "0.63861024", "0.6332538", "0.6213533", "0.6002818", "0.59769213", "0.59459466", "0.58939326", "0.58939326", "0.58939326", "0.5877605", "0.58751804", "0.58647084", "0.5804137", "0.5802045", "0.5801702", "0.57865965", "0.57757777", "0.57729423", "0.5757898", "0.575747", "0.57363987", "0.57216275", "0.5713857", "0.5707053", "0.5706606" ]
0.85507065
0
Get a list of token objects for the sentiment expression extent. The extent can be set to include targets. Setting this to an empty list will only return the original tokens.
def get_extent_tokens(self, extent=[], source_order=True): all_tokens = self.tokens.copy() for ext in extent: if getattr(self, ext): all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens) all_tokens = list( set(all_tokens) ) # this is necessary because trigger and participant spans can overlap if source_order: # return tokens in the order they appear in source text all_tokens.sort(key=lambda x: x.begin) return all_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_extent_tokens(self, extent=[\"discontiguous_triggers\"], source_order=True):\n all_tokens = self.tokens.copy()\n core_sen_idx = all_tokens[0].in_sentence.element_id # to ensure discont is in same sentence\n\n for ext in extent:\n if getattr(self, ext):\n all_tokens.extend(t for x in getattr(self, ext) for t in x.tokens if t.in_sentence.element_id == core_sen_idx)\n\n all_tokens = list(\n set(all_tokens)\n ) # this is necessary because trigger and participant spans can overlap\n if source_order: # return tokens in the order they appear in source text\n all_tokens.sort(key=lambda x: x.begin)\n\n return all_tokens", "def get_extent_token_ids(self, **kwargs):\n token_span = self.get_extent_tokens(**kwargs)\n return [t.index for t in token_span]", "def get_tokens(tu, extent):\r\n tokens_memory = POINTER(Token)()\r\n tokens_count = c_uint()\r\n\r\n conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),\r\n byref(tokens_count))\r\n\r\n count = int(tokens_count.value)\r\n\r\n # If we get no tokens, no memory was allocated. Be sure not to return\r\n # anything and potentially call a destructor on nothing.\r\n if count < 1:\r\n return\r\n\r\n tokens_array = cast(tokens_memory, POINTER(Token * count)).contents\r\n\r\n token_group = TokenGroup(tu, tokens_memory, tokens_count)\r\n\r\n for i in xrange(0, count):\r\n token = Token()\r\n token.int_data = tokens_array[i].int_data\r\n token.ptr_data = tokens_array[i].ptr_data\r\n token._tu = tu\r\n token._group = token_group\r\n\r\n yield token", "def get_tokens(self, locations=None, extent=None):\r\n if locations is not None:\r\n extent = SourceRange(start=locations[0], end=locations[1])\r\n\r\n return TokenGroup.get_tokens(self, extent)", "def getTokens(self):\n list = []\n for i in range(self.startIdx, self.endIdx + 1):\n token = self.sentence[i]\n list.append(token)\n return list", "def get_tokens(self):\r\n return TokenGroup.get_tokens(self._tu, self.extent)", "def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens", "def tokens(self):\n return self._sentrep.tokens()", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def sentences(self) -> List[str]:\n\t\treturn [self.text[start:end] for start, end in self.tokenizations]", "def tokenize(self):\n tknzr = TweetTokenizer()\n tkn = []\n for tweet in self.tweets:\n for word in tknzr.tokenize(tweet):\n tkn.append(word)\n return tkn", "def get_tokens(self) -> List[str]:\n return self.tokens", "def get_tokens(self):\r\n return self.token_set", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def extent(self):\r\n return conf.lib.clang_getTokenExtent(self._tu, self)", "def tokenize_sentences(self, sents):\n token_sentence_list = []\n for sentence in sents:\n token_sentence_list.append(self.tokenizer(sentence))\n return token_sentence_list", "def get_tokens(self):\n def _traverse_preorder(cursor, token_list): # There is a method called \"walk_preorder\" in Cursor class. Here we need to ignore some subtrees so we implement on our own.\n if cursor.location.file and cursor.location.file.name != self.filepath: # exclude \"#include <...>\"\n return\n if (cursor.kind, cursor.spelling) in exclude_types: # exclude node in 'exclude_types'\n return\n \n token_list.append(cursor)\n for child in cursor.get_children():\n _traverse_preorder(child, token_list)\n\n tokens = []\n _traverse_preorder(self.cursor, tokens)\n return tokens", "def tokens(self):\n return self.__tokens", "def tokenize(self, texts: List[str]) -> List[Token]:\n raise NotImplementedError", "def get_gene_tokens(self, classification: Classification) -> List[GeneToken]:\n return self.get_protein_gene_symbol_tokens(classification)", "def multiword_tokens(self):\n return self._mwts", "def get_sentences_list(text: str, model_type: str) -> t.List[str]:\n sentences = []\n sent_offsets = []\n stok = SentenceTokenizer.from_type(model_type)\n if isinstance(text, list):\n sentences, sent_offsets = list(zip(*map(stok.tokenize, text)))\n elif isinstance(text, str):\n sentences, sent_offsets = stok.tokenize(text)\n return sentences, sent_offsets", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def sents(self):\n\n text = str()\n for file in os.listdir(self.path):\n # checks if the given path contains a text file and opens it\n if file.endswith(\".txt\"):\n with open(self.path + \"/\" + file) as connection:\n text += connection.read()\n\n # tokenizes the text to sentences and tokenizes the tokenized sentences to words\n sentences_list = nltk.sent_tokenize(text)\n word_list = [nltk.word_tokenize(sent) for sent in sentences_list]\n\n return word_list", "def return_tag_tokens(self, tags_indexes, observations):\n tag_pred = []\n for tag_index in tags_indexes:\n tag_pred.append(observations.T.index[tag_index])\n return tag_pred" ]
[ "0.79144996", "0.66847426", "0.64567703", "0.64525014", "0.6422023", "0.63470286", "0.6025481", "0.5879312", "0.5853642", "0.58203405", "0.58203405", "0.55929494", "0.5589577", "0.55816835", "0.5545209", "0.553422", "0.54197466", "0.54197466", "0.54197466", "0.5392735", "0.5365629", "0.53329873", "0.5325342", "0.5286919", "0.527805", "0.5219621", "0.5218858", "0.52099985", "0.5202829", "0.5182594" ]
0.82137984
0
Set token id based on document id + token position in text
def get_token_id(self): return f"{self.document_title}_{self.index}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def correct_token_begin_position(self, tokens, text):\n beginning = 0\n for token in tokens:\n token.text_begin = text.find(token.text_content, beginning) \n beginning += len(token.text_content)\n return tokens", "def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def tokenize(self, start_pos=0, text=None):\n pass", "def tokenization(self, content: str, doc_id: str):\n self.metadata[doc_id] = dict()\n tokens = list()\n lines = content.splitlines()\n for i in range(200 if self.name == 'Novels' else 0, len(lines)):\n if self.name == 'HillaryEmails' or (lines[i] == '' and lines[i-1] != ''):\n words = 0\n self.metadata[doc_id]['Content'] = str()\n for j in range(i, len(lines)):\n line = lines[j]\n if line:\n words += len(line.split())\n self.metadata[doc_id]['Content'] += line + ''\n if words >= 75:\n self.metadata[doc_id]['Content'] += '...'\n break\n break\n keys = ['Title', 'Author', 'Release Date', 'Language', 'Character set encoding']\n for i, line in enumerate(lines):\n if self.name == 'Novels':\n if i < 30:\n for j in range(len(keys)):\n if keys[j] in line:\n self.metadata[doc_id][keys[j]] = line.strip().replace(keys[j]+': ', '')\n token = line.split() # default split by whitespace\n tokens.extend(zip(token, len(token) * [doc_id]))\n return tokens", "def _id(self):\n result = ''\n while self.current_char is not None and self.current_char.isalnum() or self.current_char == '_':\n result += self.current_char\n self.advance()\n\n return Token(ID, result)", "def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]", "def doc2id(self, doc):\n if isinstance(doc, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def default_idtoken_processing_hook(\n id_token, user, token, request, **kwargs):\n return id_token", "def id(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token", "def convert_idx(text, tokens):\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current) # Find position of 1st occurrence; start search from 'current' \n if current < 0:\n raise Exception(f\"Token '{token}' cannot be found\")\n spans.append((current, current + len(token)))\n current += len(token) # next search start from the token afterwards\n return spans", "def _token_to_id(self, sequence_tokens, token_map, char_map, ngram=0,\n token_ngram_map=None, max_char_sequence_length=-1,\n max_char_length_per_token=-1):\n token_id_list = []\n char_id_list = []\n char_in_token_id_list = []\n ngram_id_list = []\n for token in sequence_tokens:\n char_id = [char_map.get(x, self.VOCAB_UNKNOWN) for x in token]\n char_id_list.extend(char_id[0:max_char_sequence_length])\n char_in_token = [char_map.get(x, self.VOCAB_UNKNOWN)\n for x in token[0:max_char_length_per_token]]\n char_in_token_id_list.append(char_in_token)\n\n token_id_list.append(\n token_map.get(token, token_map[self.VOCAB_UNKNOWN]))\n if ngram > 1:\n for j in range(2, ngram + 1):\n ngram_id_list.extend(\n token_ngram_map[x] for x in\n [\"\".join(sequence_tokens[k:k + j]) for k in\n range(len(sequence_tokens) - j + 1)] if x in\n token_ngram_map)\n if not sequence_tokens:\n token_id_list.append(self.VOCAB_PADDING)\n char_id_list.append(self.VOCAB_PADDING)\n char_in_token_id_list.append([self.VOCAB_PADDING])\n if not ngram_id_list:\n ngram_id_list.append(token_ngram_map[self.VOCAB_PADDING])\n return token_id_list, char_id_list, char_in_token_id_list, ngram_id_list", "def identifier(self):\n _id = ''\n while self.current_char is not None and self.current_char.isalpha():\n # inner loop to get alphanumeric characters\n while self.current_char is not None and\\\n self.current_char.isalnum():\n _id += self.current_char\n self.advance()\n return Token(self.tokentype['ID'], _id)", "def process_id_from(self):\r\n return self._tokens[1]", "def identity_tokenizer(text):\n return text", "def token(self, id):\r\n return Token(self, id)", "def _convert_id_to_token(self, index):\n return self.reverse_vocab.get(index, self.unk_token)", "def add(self, token_docid):\n token = token_docid[0]\n doc_id = token_docid[1]\n # collapse identical tokens together\n if token in self.posting:\n self.posting[token].append(doc_id)\n else:\n self.posting[token] = [doc_id]", "def _id(self, document):\n pass", "def id_to_token(self, idx):\n return self._id2token[idx]", "def id_to_token(self, idx):\n return self._id2token[idx]", "def tokenization(text):\n\n global tokenizer_tree\n tokenised_document = tokenizer_tree.tokenize(text)\n return tokenised_document", "def words_to_id(text, is_list=False, old_word_to_id=None):\n if is_list:\n x = \"\"\n for line in text:\n x += line + \" \"\n text = x\n \n uniq_words = set(text.split(\" \"))\n \n if old_word_to_id:\n word_to_id = old_word_to_id\n start = len(old_word_to_id)\n for word in uniq_words:\n if word not in word_to_id:\n word_to_id[word] = start\n start += 1\n else:\n word_to_id = {word:i for i, word in enumerate(uniq_words)}\n \n id_to_word = {str(v):k for k,v in word_to_id.items()}\n return word_to_id, id_to_word", "def map_id_to_token(self, id: int):\n return self._id_to_token[id]", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def id_to_token(self, index):\r\n return self.decoder.get(index)", "def doc2idx(self, document, unknown_word_index=-1):\n if isinstance(document, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n\n document = [word if isinstance(word, unicode) else unicode(word, 'utf-8') for word in document]\n return [self.token2id.get(word, unknown_word_index) for word in document]", "def tokenize_docs(self):\n if not hasattr(self,\"max_seq_len\"):\n self.max_seq_len = 0\n \n context_cols = self.cols\n\n for df in [self.train, self.val, self.test]:\n df[\"tok_context\"] = df[context_cols].values.tolist()\n df[\"tok_context\"] = df[\"tok_context\"].apply(self.docs_to_ids)\n self.max_seq_len = max(self.max_seq_len,\n df[\"tok_context\"].apply(\n lambda x: max(map(len,x))).max())\n \n self.train[\"tok_ending\"] = self.train[self.train_cols[-1]].apply(\n self.str_to_ids)\n self.val[\"tok_ending_1\"] = self.val[self.test_val_cols[-2]].apply(\n self.str_to_ids)\n self.val[\"tok_ending_2\"] = self.val[self.test_val_cols[-1]].apply(\n self.str_to_ids)\n self.test[\"tok_ending_1\"] = self.test[self.test_val_cols[-2]].apply(\n self.str_to_ids)\n self.test[\"tok_ending_2\"] = self.test[self.test_val_cols[-1]].apply(\n self.str_to_ids)\n self.max_seq_len = max(\n self.max_seq_len,\n self.train[\"tok_ending\"].apply(len).max(),\n self.val[\"tok_ending_1\"].apply(len).max(),\n self.val[\"tok_ending_2\"].apply(len).max(),\n self.test[\"tok_ending_1\"].apply(len).max(),\n self.test[\"tok_ending_2\"].apply(len).max(),\n )\n # Max_seq_len is without BOM, EOM\n self.max_seq_len += 2\n log(\"Maximum sequence length: {}\".format(self.max_seq_len))" ]
[ "0.61441404", "0.6103226", "0.60713357", "0.58848673", "0.58489716", "0.5830993", "0.5819702", "0.5808241", "0.5795522", "0.57590806", "0.5744294", "0.5744294", "0.5727401", "0.57221353", "0.57152474", "0.56934476", "0.568862", "0.56846684", "0.5665206", "0.5659678", "0.56587857", "0.56496584", "0.56496584", "0.5642953", "0.5628797", "0.56241965", "0.5615611", "0.56137437", "0.5599643", "0.55956554" ]
0.7065767
0
Append value to attribute list, create the attribute list if does not exist.
def append_attribute(myobj, attrib_k, val): vals = getattr(myobj, attrib_k, []) if val not in vals: vals.append(val) setattr(myobj, attrib_k, vals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setAttribute(self, attribute, value):\n\n # if multiple values found\n if hasattr(self, attribute):\n\n # make sure attribute is a list\n values = getattr(self, attribute)\n if not isinstance(values, list):\n setattr(self, attribute, [values])\n\n # append value to list\n getattr(self, attribute).append(value)\n\n # single value found\n else:\n setattr(self, attribute, value)", "def add_attr(self, attr, value, position=None, extra=None):\n # pylint: disable=eval-used\n if attr.startswith(\"*\"):\n attr = attr[1:]\n if attr not in self._attributes:\n self._attributes[attr] = []\n if len(self._attributes[attr]) != position:\n raise TypeError(\"AST Node lost in conversion!\")\n self._attributes[attr].append(value)\n elif extra is not None:\n self._attributes[attr] = eval(extra)\n else:\n self._attributes[attr] = value", "def add_value(self, key, value):\r\n if key in self:\r\n # We already have this key on the item.\r\n if not isinstance(self[key], list):\r\n # The key isn't already a list, take its current value and\r\n # convert it to a list with the only member being the\r\n # current value.\r\n self[key] = [self[key]]\r\n # Add the new value to the list.\r\n self[key].append(value)\r\n else:\r\n # This is a new attribute, just set it.\r\n self[key] = value", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))", "def add_attribute(self, attr):\n self.add(attr)", "def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])", "def batch_add(self, *args, **kwargs):\n new_attrobjs = []\n strattr = kwargs.get(\"strattr\", False)\n for tup in args:\n if not is_iter(tup) or len(tup) < 2:\n raise RuntimeError(\"batch_add requires iterables as arguments (got %r).\" % tup)\n ntup = len(tup)\n keystr = str(tup[0]).strip().lower()\n new_value = tup[1]\n category = str(tup[2]).strip().lower() if ntup > 2 and tup[2] is not None else None\n lockstring = tup[3] if ntup > 3 else \"\"\n\n attr_objs = self._getcache(keystr, category)\n\n if attr_objs:\n attr_obj = attr_objs[0]\n # update an existing attribute object\n attr_obj.db_category = category\n attr_obj.db_lock_storage = lockstring or \"\"\n attr_obj.save(update_fields=[\"db_category\", \"db_lock_storage\"])\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = new_value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = new_value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(new_value),\n \"db_strvalue\": new_value if strattr else None,\n \"db_lock_storage\": lockstring or \"\",\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n new_attrobjs.append(new_attr)\n self._setcache(keystr, category, new_attr)\n if new_attrobjs:\n # Add new objects to m2m field all at once\n getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)", "def append(self, value) -> None:\n key = getattr(value, self.keyattr)\n if callable(key):\n key = key()\n if key not in self.data:\n self.data[key] = []\n self.data[key].append(value)\n self.size += 1", "def add_attribute(self, attr):\n self.attrs.add(attr)", "def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)", "def add_attribute(self, attribute):\n if attribute not in self.attributes:\n self.attributes.add(attribute)\n self.attribute_list.append(attribute)\n return self", "def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value", "def add_attribute(self, name, value, modify=False, sources=None, published_at=None):\n\n # Find attributes and remove if matches\n if modify:\n if not self.current_attributes:\n res = self.tq.get('{}/attributes'.format(self._get_api_endpoint()))\n self.current_attributes = res.get('data', [])\n\n for attr in self.current_attributes:\n if attr['name'] == name and attr['value'].lower() != value.lower():\n attribute_id = attr['id']\n self.tq.delete(\n '{}/attributes/{}'.format(self._get_api_endpoint(), attribute_id))\n # break\n\n data = {'name': name, 'value': value}\n if sources and isinstance(sources, str):\n data['sources'] = [{'name': sources}]\n elif sources and isinstance(sources, dict):\n data['sources'] = [sources]\n elif sources and isinstance(sources, list):\n data['sources'] = sources\n if published_at:\n data['published_at'] = published_at\n\n res = self.tq.post('{}/attributes'.format(self._get_api_endpoint()), data=data)\n\n # Add the newly added indicator to the cache\n if res.get('total', 0) > 0:\n self.current_attributes.append(res['data'][0])", "def add_attribute(self, key, value):\n self.attributes[key] = value", "def record_attribute_set(self, typ, attr_name, node, value):\n serialized = self.serialize_type(typ)\n if serialized is None:\n return\n self.attributes_set[serialized].add(attr_name)\n self.merge_attribute_value(serialized, attr_name, value)", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def append(self, value):\n self.list.append(value)", "def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)", "def append(self):\n return AttributeFunctor(self, lambda a, b: a + b)", "def createAttribute(nid, label, primary, list, x, y):\n attribute = Attribute(nid, label, primary, x, y)\n list.append(attribute)", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def append(self, value):\n\n list.append(self, value)\n self.changed()", "def addattribute(self, uid, field, value):\n\n raise NotImplementedError" ]
[ "0.72828615", "0.7063005", "0.70154303", "0.6982224", "0.6949298", "0.6837699", "0.66487557", "0.6644792", "0.660447", "0.6597281", "0.65214103", "0.64085853", "0.639992", "0.6395541", "0.63902277", "0.6352462", "0.6338568", "0.63299024", "0.63299024", "0.63299024", "0.6323698", "0.6318956", "0.63034755", "0.628151", "0.62806904", "0.627548", "0.627548", "0.627548", "0.62751114", "0.6269866" ]
0.74247986
0
Function to converts XML attributes into a dictionary
def __convertAttributes__(xml_source): attributes = {} for attrName, attrValue in xml_source.attributes.items(): attributes[attrName] = attrValue return attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_attrs_dict(self, root_element):\n attr_elements = root_element.findall(\"attribute\")\n attrs_dict = {}\n for el in attr_elements:\n attrs_dict[el.attrib[\"name\"]] = {\n \"value\": el.attrib[\"value\"],\n \"type\": el.attrib.get(\"type\", None)\n }\n return attrs_dict", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))", "def get_attrib_dict(self, attribs: Tuple[str]) -> Dict[str, str]:\n attrs = self.get_attribs(attribs)\n attrs = tuple(map(lambda a: (a[0][1:], a[1]), attrs))\n return dict(attrs)", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))", "def xml2dict( xml, sanitize=True, prefix=None):\n \n \n #Decode to avert parsing errors as some software dump large text\n #fields into the file that occasionally contain erronious chars\n xml=xml.decode('utf-8', errors='ignore')\n\n \n return etree2dict(etree.fromstring(xml), sanitize, prefix)", "def attribute_to_dict(attr: onnx.AttributeProto) -> Dict:\n ret = {}\n for a in attr:\n value = get_attribute_value(a)\n if isinstance(value, bytes):\n value = str(value, 'utf-8')\n ret[a.name] = value\n return ret", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def convert_attr_to_dict(attr):\n\n\tresult = dict()\n\tattr = attr.split(';')\n\tattrlist = [a.split(':') for a in attr]\n\tfor pair in attrlist:\n\t\tif len(pair) == 2:\n\t\t\tkey = pair[0]\n\t\t\tvalue = pair[1]\n\t\t\tresult[key] = value\n\n\treturn result", "def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))", "def load_attribute_dict(parent_element, tag_name):\n loaded_element = parent_element.find(tag_name)\n attribute_dict = {attr[0] : convert_to_int_if_numeric(attr[1])\n for attr in loaded_element.items()}\n return attribute_dict", "def get_attributes(self) -> Dict[str, str]:\n pass", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def get_attributes_of_node(core, node):\n attributes = {}\n for attribute in core.get_attribute_names(node):\n attributes[attribute] = core.get_attribute(node,attribute)\n return attributes", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def test_xml_to_dict(self):\n xml = \"\"\"\n <a\n xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n xmlns:nm=\"s\"\n >\n <b m=\"g\">\n b\n </b>\n <c nm:nm=\"update\">\n <f>1</f>\n <e>str</e>\n <d>1</d>\n <d>2</d>\n <d>3</d>\n </c>\n </a>\n \"\"\"\n xmlns = {\n \"_\": utils.NETCONF_NAMESPACE\n }\n result = utils.generate_dict_node(etree.XML(xml), xmlns)\n # check dict\n self.assertDictEqual(\n self.SIMPLE_DICT,\n result\n )\n # check xmlns\n self.assertEqual(\n {\n '_': utils.NETCONF_NAMESPACE,\n 'nm': 's'\n }, xmlns\n )", "def to_dict(xml):\n children = xml.getchildren()\n if not children:\n return xml.text\n else:\n out = {}\n for node in xml.getchildren():\n if node.tag in out:\n if not isinstance(out[node.tag], list):\n out[node.tag] = [out[node.tag]]\n out[node.tag].append(to_dict(node))\n else:\n out[node.tag] = to_dict(node)\n return out", "def convert_attributes(cls, attrs):\n return {}", "def parse_attributes(data, attributes):\n result = {}\n for key, val in attributes.items():\n if type(val) is list:\n attr_val = get_tree_data(data, val)\n else:\n attr_val = data.get(val, None)\n if attr_val is not None:\n result[key] = attr_val\n return result", "def xml_to_dict(args):\n rdict = dict()\n args = re.sub(r'xmlns=\\\".+?\\\"', '', args)\n root = ET.fromstring(args)\n ifmtrunk = root.find('.//ifmtrunk')\n if ifmtrunk is not None:\n try:\n ifmtrunk_iter = ET.Element.iter(ifmtrunk)\n except AttributeError:\n ifmtrunk_iter = ifmtrunk.getiterator()\n\n for ele in ifmtrunk_iter:\n if ele.text is not None and len(ele.text.strip()) > 0:\n rdict[ele.tag] = ele.text\n return rdict", "def parse_attributes(self, attr):\n result = {}\n annotations = []\n # Sanitize and split attributes up\n split_attr = attr.strip(' \\t\\n;').split(';')\n for pair in split_attr:\n splitpair = pair.split('=')\n if len(splitpair) != 2:\n continue\n if splitpair[0] == \"ID\":\n result['identifier'] = splitpair[1]\n elif splitpair[0] == \"Name\":\n result['name'] = splitpair[1]\n elif splitpair[0] == \"Parent\":\n result['parent_id'] = splitpair[1]\n elif splitpair[0] == \"Dbxref\" or splitpair[0] == \"Ontology_term\":\n annotations.append(splitpair)\n # Make sure we found an ID\n if \"identifier\" not in result:\n return {}\n # Add annotations if we found any\n if annotations:\n result[\"annotations\"] = annotations\n return result", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)", "def parse_common_attributes(self, element, attributes):\n result_attributes = {}\n for attribute in attributes:\n attrib_info = ALL_ATTRIBUTES_MAP[attribute]\n if attrib_info.get(\"default\", MISSING) is not MISSING:\n default_value = attrib_info[\"default\"]\n else:\n default_value = MISSING\n attribute_name = attrib_info.get(\"attribute_name\", attribute)\n value = element.attrib.get(attribute_name, default_value)\n if value is MISSING or value == \"\":\n if attrib_info.get(\"use\") == \"optional\":\n continue\n\n else:\n raise ValueError(\n f\"element {element.tag} missing required \"\n f\"attribute {attribute_name}\"\n )\n\n if attrib_info.get(\"type\"):\n try:\n value = attrib_info[\"type\"](value)\n except ValueError as exc:\n raise ValueError(\n f\"element {element.tag} invalid value \"\n f\"{repr(value)} for attribute {attribute_name}\"\n ) from exc\n\n if attrib_info.get(\"minimumValue\"):\n if value < attrib_info[\"minimumValue\"]:\n raise ValueError(\n f\"element {element.tag} invalid value {repr(value)}\"\n f\" for attribute {attribute_name},\"\n \"less than allowed minimum \"\n f\"{repr(attrib_info['minimumValue'])}\"\n )\n\n if attrib_info.get(\"pattern\"):\n if not attrib_info[\"pattern\"].match(value):\n raise ValueError(\n f\"element {element.tag} invalid value {repr(value)} \"\n f\"for attribute {attribute_name},\"\n \"does not match expected pattern \"\n f\"{repr(attrib_info['pattern'])}\"\n )\n\n if attrib_info.get(\"map\"):\n try:\n value = attrib_info[\"map\"][value]\n except (KeyError, IndexError) as exc:\n raise ValueError(\n f\"element {element.tag} invalid value {repr(value)} \"\n f\"for attribute {attribute_name}\"\n f\", must be one of {repr(attrib_info['map'].keys())}\"\n ) from exc\n\n if attrib_info.get(\"rename\"):\n attribute = attrib_info[\"rename\"]\n\n result_attributes[attribute] = value\n\n return result_attributes", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def to_dict(\n self,\n attributes: Iterable[str] = (\"xyz\", \"viewdir\", \"imgsz\", \"f\", \"c\", \"k\", \"p\"),\n ) -> Dict[str, tuple]:\n return {key: helpers.numpy_to_native(getattr(self, key)) for key in attributes}", "def _get_attribute_dict(self, attributes, classname=None):\n if attributes and isinstance(attributes, six.string_types):\n return {\n 'class': attributes\n }\n if not attributes:\n attributes = {}\n if not classname:\n classname = self.DEFAULT_CLASS_NAME\n attributes.setdefault('class', classname)\n return attributes", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass" ]
[ "0.7548967", "0.722563", "0.71245855", "0.711149", "0.7095163", "0.7094138", "0.70292187", "0.6932637", "0.69197536", "0.68689096", "0.6855458", "0.68021226", "0.68001693", "0.6756407", "0.6698822", "0.6619554", "0.6612907", "0.6576497", "0.6554866", "0.6512518", "0.6490863", "0.6473228", "0.6458487", "0.6455899", "0.6425571", "0.6424577", "0.6365587", "0.6260193", "0.62454116", "0.61772186" ]
0.7341555
1
Run the Spacy processing pipeline on the annotation documents. Add processed docs so they can be accessed.
def process_spacy(self): def prevent_sentence_boundary_detection(doc): for token in doc: # This will entirely disable spaCy's sentence detection token.is_sent_start = False return doc def process_sentence(sen_tokens): doc = spacy.tokens.Doc(nlp.vocab, words=sen_tokens) tagger(doc) prevent_sbd(doc) ner(doc) parser(doc) return doc # setup spacy nlp pipeline nlp = spacy.load("en_core_web_lg") parser = nlp.get_pipe("parser") nlp.add_pipe( prevent_sentence_boundary_detection, name="prevent-sbd", before="parser" ) tagger = nlp.get_pipe("tagger") prevent_sbd = nlp.get_pipe("prevent-sbd") parser = nlp.get_pipe("parser") ner = nlp.get_pipe("ner") for doc in self.annotation_documents: doc.sentences_processed = [] for sen in doc.sentences: sen_tokens = [t.text for t in sen.tokens] sen_proc = process_sentence(sen_tokens) # add processed sentence to doc doc.sentences_processed.append(sen_proc) print(f"Processed with Spacy: {doc.document_id}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def process_document(self, document, **kwargs):\n \n start = time.time()\n\n if self._log_file is not None:\n map_annotations_logger.addHandler(file_handler(self._log_file))\n map_annotations_logger.setLevel(self._log_level)\n \n ref_annotation = document.annotation(self._annotation_name)\n ref_annotations = ref_annotation.annotations\n values = set([a.value for a in ref_annotations])\n new_annotations = [Tag(self._mapping.get(annotation.value, annotation.value), annotation.lb, annotation.ub) for annotation in ref_annotations if self._mapping.get(annotation.value, None) != u\"\"]\n \n document.add_annotation(Annotation(self._annotation_name, reference=ref_annotation.reference, annotations=new_annotations))\n \n laps = time.time() - start\n map_annotations_logger.info('in %s', timedelta(seconds=laps))", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def processed_bulk(self, pipeline):\n docs = [Document([], text=t) for t in EN_DOCS]\n return pipeline(docs)", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def pre_process(self, documents):\n\n return documents", "def processed_doc(self, pipeline):\n return [pipeline(text) for text in EN_DOCS]", "def parse(self, doc):\n self.preprocessor.preprocess(doc)\n\n for extractor in self.extractors:\n extractor.extract(doc)\n\n return doc", "def _process_texts(self, texts, generator = False):\n wordlists = [\n x for x in [self.tokenizer.tokenize(doc) for doc in texts] if x != []\n ]\n self.vocab = corpora.Dictionary(wordlists)\n self.has_vocab = True\n self.corpus = (self.vocab.doc2bow(doc) for doc in wordlists)\n if not generator:\n self.corpus = list(self.corpus)\n self.has_corpus = True", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def pipe(self, docs: Iterable[Doc]) -> Iterable[Doc]:\n\n stream1, stream2 = itertools.tee(docs, 2)\n\n # Remove existing entities from the document\n stream2 = (self.create_new_doc(d) for d in stream2)\n \n # And run the model\n for _, proc in self.model.pipeline:\n stream2 = proc.pipe(stream2)\n \n for doc, doc_copy in zip(stream1, stream2):\n\n doc.spans[self.name] = []\n\n # Add the annotation\n for ent in doc_copy.ents:\n doc.spans[self.name].append(Span(doc, ent.start, ent.end, ent.label_))\n\n yield doc", "def process(self, fulltext=None):\r\n raise NotImplementedError(\"Please implement this in your importer\")", "def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")", "def clean_all_documents(cls):\n for index, text in enumerate(cls.documents):\n text_processed = cls.clean_document(text)\n cls.processed_documents.append(text_processed)", "def process(self):\n # tokenize, then filter & otherwise process words in each document\n # using steps in preprocess_doc()\n\n all_posts_count = self.postman.posts_read.find({'subreddit': self.postman.subreddit}).count()\n\n for post_idx, post in enumerate(self.postman.posts_read.find({'subreddit': self.postman.subreddit})):\n # preprocess the post and add the new words to the corpus\n new_words = self.preprocess_post(post)\n self.corpus.update(new_words)\n\n # print on every Nth post so you know it's alive\n if post_idx % 100 == 0:\n print 'done post %i out of %i' % (post_idx, all_posts_count)\n\n #TODO:\n print 'word count and other corpus-level filters not implemented, skipping...'\n # corpus-level filtering\n # get rid of invalid documents (based on word count)\n # self.corpus = [doc for doc in self.corpus if self.doc_has_valid_wc(doc)]\n # print 'filtered out %i out of %i documents' % (pre_corpus_len - len(self.corpus), pre_corpus_len)\n # stem or lemmatize\n # if self.stem_or_lemma_callback:\n # self.corpus = [self.perform_stem_or_lem(doc) for doc in self.corpus]\n # for chaining\n #######################################################\n\n return self", "def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)", "def annotate(self,corpus):\n\n\t\tassert corpus.parsed == True, \"Corpus must already be parsed before entity recognition\"\n\n\t\tfor doc in corpus.documents:\n\t\t\tentityCount = len(doc.entities)\n\t\t\tfor sentence in doc.sentences:\n\t\t\t\twords = [ t.word for t in sentence.tokens ]\n\t\t\t\t\n\t\t\t\tfor i,t in enumerate(sentence.tokens):\n\t\t\t\t\tif not isNumber(t.word):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tsourceEntityID = \"T%d\" % (entityCount+1)\n\t\t\t\t\ttext = doc.text[t.startPos:t.endPos]\n\t\t\t\t\tloc = [i]\n\n\t\t\t\t\te = kindred.Entity('quantity',text,[(t.startPos,t.endPos)],sourceEntityID=sourceEntityID)\n\t\t\t\t\tdoc.addEntity(e)\n\t\t\t\t\tsentence.addEntityAnnotation(e,loc)\n\t\t\t\t\tentityCount += 1", "def ingest_annotations(directory, annotation_type):\n in_golden = annotation_type == \"manual\"\n doc_type = None\n issue_number = None\n bid = None\n annotated_pages = []\n file_prefix = \"page\"\n try:\n bid = directory.split('/')[len(directory.split('/'))-1:][0]\n record = Metadata.objects(bid=bid).first()\n assert record is not None\n except Exception as e:\n bid,issue_number = directory.split('/')[len(directory.split('/'))-2:]\n record = Metadata.objects(bid=bid).first()\n try:\n doc_type = record[\"type_document\"]\n except Exception as e:\n doc_type = None\n logger.warning('The record for %s is not in MongoDB'%bid)\n try:\n page_numbers = {int(os.path.basename(fname).replace(\"page-\",\"\").split('.')[0]):os.path.basename(fname).split('.')[0] \n for fname in glob.glob(\"%s/*.ann\"%directory)}\n except Exception as e:\n page_numbers = {int(os.path.basename(fname).replace(\"image-\",\"\").split('.')[0]):os.path.basename(fname).split('.')[0] \n for fname in glob.glob(\"%s/*.ann\"%directory)}\n file_prefix = \"image\"\n # TODO: handle the exception of document not in the DB\n logger.info(\"Ingesting the annotations from directory \\\"%s\\\"\"%directory)\n if(issue_number != None):\n logger.info(\"Found document %s-%s [type=%s]\"%(bid,issue_number,doc_type))\n else:\n logger.info(\"Found document %s [type=%s]\"%(bid,doc_type))\n try:\n if(doc_type==\"journal\"):\n doc = LBDocument.objects(internal_id=\"%s-%s\"%(bid,issue_number)).first()\n if doc is None:\n doc = LBDocument.objects(internal_id=\"%s-%s\"%(bid, convert_issue_number(issue_number))).first()\n elif(doc_type==\"monograph\"):\n doc = LBDocument.objects(bid=bid).first()\n logger.info(\"%s has %i pages\"%(doc.internal_id, len(doc.pages)))\n for page_n in sorted(page_numbers):\n logger.debug(\"Reading in annotations for page %i from file %s/ %s\"%(page_n,directory,page_numbers[page_n]))\n entities_with_continuations = {}\n entities,relations = read_ann_file(page_numbers[page_n],directory)\n fulltext = codecs.open(\"%s/%s.txt\"%(directory,page_number2image_name(page_n, string=file_prefix)),'r', 'utf-8').read()\n line_breaks = find_newlines(fulltext)\n #logger.info(\"Found %i entities, %i relation in %s\"%(len(entities), len(relations), directory))\n doc_id = \"%s-%s-%s\"%(bid, issue_number, page_numbers[page_n])\n try:\n page = next((page for page in doc.pages if page.single_page_file_number==page_n))\n if(page[\"in_golden\"]==True):\n annotated_pages.append(page.id)\n logger.info(\"found %i entities in %s (p. %i)\"%(len(entities),doc_id,page_n))\n logger.info(\"found %i relations in %s (p. %i)\"%(len(relations.keys()),doc_id,page_n))\n \"\"\"\n Parse the `ContainedIN` relations and identify annotations that should be merged together.\n IDs of candidates for merging are stored in a dict(), e.g. {\"T1\":[\"T2\",\"T4\"]}\n \"\"\"\n entities_with_continuations = {}\n if len(relations.keys())>0:\n for relation_key in relations:\n args = relations[relation_key][\"arguments\"]\n if args[0] in entities_with_continuations:\n entities_with_continuations[args[0]].append(args[1])\n else:\n entities_with_continuations[args[0]] = [args[1]]\n logger.debug(\"(%s-%s) entities to be merged: %s\"%(doc_id,page_n,entities_with_continuations))\n \"\"\"\n Create the annotations (`entities` dict). \n Later they will be stored into the MongoDB\n \"\"\"\n for entity in entities:\n entities[entity][\"ingestion_timestamp\"] = datetime.utcnow()\n entities[entity][\"annotation_ingester_version\"] = __version__\n entities[entity][\"entity_type\"] = entities[entity][\"entity_type\"].lower( )\n entities[entity][\"filename\"] = \"%s/%s%s\"%(directory,page_numbers[page_n],\".ann\")\n if(doc_type==\"journal\"):\n entities[entity][\"bid\"] = bid\n entities[entity][\"pageid\"] = doc_id\n elif(doc_type==\"monograph\"):\n entities[entity][\"bid\"] = bid\n entities[entity][\"pageid\"] = \"%s-%s\"%(bid,page_numbers[page_n])\n entities[entity][\"container\"] = entities[entity][\"entity_type\"] in container_annotations\n # ref to page_id (from content_loader) ✓\n for position in entities[entity][\"positions\"]:\n line_number = find_linenumber_for_string(position[\"start\"],position[\"end\"], line_breaks)\n logger.debug(\"%s is found at line %s\"%(entity,line_number))\n position[\"line_n\"] = line_number\n position[\"page_id\"] = page.id\n positions_by_offset = sorted(entities[entity][\"positions\"]\n ,key=lambda position: position['start'])\n entities[entity][\"positions\"] = sorted(positions_by_offset\n , key=lambda position: Page.objects(id=position['page_id']).first().single_page_file_number)\n logger.debug(\"Annotations %s %s\"%(entity,entities[entity]))\n \"\"\"\n Now take the candidates for merging identified above and populate the annotations.\n Still nothing is saved into MongoDB at this stage.\n \"\"\"\n for ann_id in entities_with_continuations:\n try:\n logger.debug(\"Starting to merge SP and SF entities into meta-annotations (%s-%s)\"%(doc_id, page_n))\n logger.debug(\"%s will be merged with %s\"%(ann_id,\"+\".join(entities_with_continuations[ann_id])))\n top_entity_types = \"_\".join([entities[ann_id][\"entity_type\"]]+[entities[annid][\"entity_type\"] \n for annid in entities_with_continuations[ann_id]])\n logger.debug(\"%s\"%top_entity_types)\n new_entity = copy.deepcopy(entities)[ann_id] \n #container = True \n new_entity[\"ann_id\"] = \"%s+%s\"%(ann_id,\"+\".join(entities_with_continuations[ann_id]))\n new_entity[\"entity_type\"] = \"meta-annotation\"\n new_entity[\"top_entity_types\"] = top_entity_types\n new_entity[\"top_entities_ids\"] = [ann_id]\n new_entity[\"top_entities_ids\"] += [id for id in entities_with_continuations[ann_id]]\n fname = new_entity[\"filename\"]\n new_entity[\"filename\"] = [fname]\n for to_merge_id in entities_with_continuations[ann_id]:\n to_merge = dict(entities)[to_merge_id]\n new_entity[\"filename\"]+= [to_merge[\"filename\"]]\n new_entity[\"positions\"] = new_entity[\"positions\"] + to_merge[\"positions\"]\n positions_by_offset = sorted(new_entity[\"positions\"]\n ,key=lambda position: position['start'])\n new_entity[\"positions\"] = sorted(positions_by_offset\n ,key=lambda position: Page.objects(id=position['page_id']).first().single_page_file_number)\n new_entity[\"filename\"] = \", \".join(list(set(new_entity[\"filename\"])))\n surface_start = new_entity[\"positions\"][0][\"start\"]\n surface_end = new_entity[\"positions\"][-1][\"end\"]\n new_entity[\"surface\"] = fulltext[surface_start:surface_end]\n entities[new_entity[\"ann_id\"]] = new_entity\n logger.debug(new_entity)\n except Exception as e:\n logger.error(\"The merging of %s in (%s-%s) failed with error\\\"%s\\\"\"%(new_entity[\"ann_id\"],bid,page_n,e))\n \"\"\"\n Now all annotations will be stored into the MongoDB. \n And some specific fields (e.g. `top_entities`) are sorted, and annotations updated \n accordingly in the DB. \n \"\"\"\n try:\n annotations = []\n for entity in entities.values():\n annotation = Annotation(**entity)\n annotation.positions = [PagePosition(**position) for position in entity[\"positions\"]]\n annotation.save()\n annotations.append(annotation)\n page.annotations_ids = [] #TODO\n page.annotations_ids = annotations\n page.is_annotated = True\n page.save()\n logger.debug(\"Following annotations were inserted into MongoDB: %s\"%([annotation.id for annotation in annotations]))\n logger.info(\"%i annotations were inserted into MongoDB\"%len(annotations))\n except Exception as e:\n raise e\n containers = [annotation for annotation in annotations if annotation[\"container\"]] \n contained = [annotation for annotation in annotations if not annotation[\"container\"]]\n meta_annotations = [annotation for annotation in annotations if annotation[\"entity_type\"]==\"meta-annotation\"]\n logger.debug(\"meta annotations: %s\"%meta_annotations)\n \"\"\"\n Resolve the top entities in the meta-annotations: replace entity IDs with \n a reference to the annotation in the MongoDB.\n \"\"\"\n for annotation in meta_annotations:\n top_entities_ids = annotation[\"top_entities_ids\"]\n logger.debug('resolving top_entities')\n top_entities = [Annotation.objects(ann_id=ann_id, pageid=annotation.pageid).first() for ann_id in top_entities_ids]\n #top_entities = list([db_conn.annotations.find_one({\"ann_id\":ann_id,\"pageid\":annotation[\"pageid\"]}) for ann_id in top_entities_ids])\n logger.debug(\"Top entities before sorting %s\"%[ann.id for ann in top_entities])\n annotation[\"top_entities\"] = sort_annotations_by_offset(top_entities)\n logger.debug(\"Top entities after sorting %s\"%[ann.id for ann in top_entities])\n annotation[\"top_entities\"] = top_entities\n annotation.save()\n logger.debug(\"Updating meta-annotation: %s\"%annotation.id)\n \"\"\"\n Transform contains relations between entities into references between annotations \n in the MongoDB.\n \"\"\"\n for annotation in sort_annotations_by_offset(containers):\n if(len(annotation[\"positions\"]) > 1):\n start = annotation[\"positions\"][0][\"start\"]\n end = annotation[\"positions\"][len(annotation[\"positions\"])-1][\"end\"]\n else:\n start = annotation[\"positions\"][0][\"start\"]\n end = annotation[\"positions\"][0][\"end\"]\n annotation[\"contains\"] = []\n for contained_annotation in sort_annotations_by_offset(contained):\n if(len(contained_annotation[\"positions\"])>1):\n if(contained_annotation[\"positions\"][0][\"start\"] >= start\n and contained_annotation[\"positions\"][len(contained_annotation[\"positions\"])-1][\"end\"] <= end):\n annotation[\"contains\"].append(contained_annotation)\n logger.debug(\"[%s] Annotation %s (%s) contains %s (%s)\"%(\n doc_id\n ,annotation[\"ann_id\"]\n ,annotation[\"id\"]\n ,contained_annotation[\"ann_id\"]\n ,contained_annotation[\"id\"]))\n annotation.save()\n else:\n if(contained_annotation[\"positions\"][0][\"start\"] >= start\n and contained_annotation[\"positions\"][0][\"end\"] <= end):\n annotation[\"contains\"].append(contained_annotation)\n logger.debug(\"[%s] Annotation %s (%s) contains %s (%s)\"%(\n doc_id\n ,annotation[\"ann_id\"]\n ,annotation[\"id\"]\n ,contained_annotation[\"ann_id\"]\n ,contained_annotation[\"id\"]))\n annotation.save()\n else:\n page.is_annotated = False\n logger.info(\"%s was ignored because it's not in the golden set\"%doc_id)\n except StopIteration as e:\n logger.error(\"The annotations for %s-%s p. %i can't be ingested\"%(bid, issue_number, page_n))\n except Exception as e:\n logger.error(\"The annotations for %s-%s can't be ingested. Got error %s\"%(bid, issue_number, e))\n return annotated_pages", "def preproc_doc(document):\n\n # Each document is a list of lines\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # set a random seed for reproducability\n # since this function is run in parallel, if we hardcode a seed, all\n # documents will have the same permutations. Instead we use the hash of the\n # first sentence as the seed so it is different for each document and it\n # is still reproducible.\n hash_object = hashlib.md5(document[0])\n rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))\n\n # Each document is composed of a list of sentences. We create paragraphs\n # by keeping together sentences on the same line and adding adjacent sentences\n # if there are fewer than 5 to form the paragraph.\n # The utility functions below expect the document to be split by paragraphs.\n list_of_paragraphs = []\n paragraph = []\n for line in document:\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent in sents:\n tokens = tokenizer.tokenize(sent)\n if tokens:\n paragraph.append(tokens)\n if len(paragraph) > 5:\n list_of_paragraphs.append(paragraph)\n paragraph = []\n\n # In case of any empty paragraphs, remove them.\n list_of_paragraphs = [x for x in list_of_paragraphs if x]\n\n # Convert the list of paragraphs into TrainingInstance object\n # See preprocessing_utils.py for definition\n if FLAGS.format == FORMAT_BINARY:\n instances = create_instances_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n elif FLAGS.format == FORMAT_PARAGRAPH:\n instances = create_paragraph_order_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_examples = [\n convert_instance_to_tf_example(tokenizer, instance,\n FLAGS.max_seq_length)[0]\n for instance in instances\n ]\n\n # Serialize TFExample for writing to file.\n tf_examples = [example.SerializeToString() for example in tf_examples]\n\n return tf_examples", "def process(self, doc_data):\n self.doc_data = doc_data\n self.process_text(self.auto_link_messages)\n self.process_text(self.auto_link_xips)\n self.add_type_sizes()\n return self.doc_data", "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def process(self, doc):\n self.doc = doc\n if self.replace_words is True:\n self.replace_words_fun()\n if self.remove_html_tags is True:\n self.remove_html_tags_fun()\n if self.remove_stopwords is True:\n self.remove_stopwords_fun()\n if self.remove_numbers is True:\n self.remove_numbers_fun()\n if self.remove_punctations is True:\n self.remove_punctations_fun() \n if self.lemmatize is True:\n self.lemmatize_fun()\n return self.doc", "def postprocess_docs(self, doc_scores, docs, input_strings, add_eos, prefix, print_docs=False):\n\n def cat_input_and_doc(doc_score, domain, entity_name, doc_title, doc_text, input_string, add_eos, prefix, print_docs=False):\n # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation\n # TODO(piktus): better handling of truncation\n if doc_title.startswith('\"'):\n doc_title = doc_title[1:]\n if doc_title.endswith('\"'):\n doc_title = doc_title[:-1]\n if prefix is None:\n prefix = \"\"\n if entity_name is None:\n entity_name = \"*\"\n suffix = self.generator_tokenizer.eos_token if add_eos else \"\"\n out = (\n prefix + domain + self.config.title_sep + entity_name + self.config.title_sep + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string + suffix\n ).replace(\" \", \" \")\n if print_docs:\n logger.info(\"{} {}\".format(doc_score, out))\n return out\n\n rag_input_strings = [\n cat_input_and_doc(\n doc_scores[i][j],\n docs[i][j]['domain'],\n docs[i][j]['entity_name'],\n docs[i][j]['doc']['title'],\n docs[i][j]['doc']['body'],\n input_strings[i],\n add_eos,\n prefix,\n print_docs,\n )\n for i in range(len(docs))\n for j in range(self.n_docs)\n ]\n\n contextualized_inputs = self.generator_tokenizer.batch_encode_plus(\n rag_input_strings,\n max_length=self.config.max_combined_length,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=False,\n ).to(doc_scores.device)\n\n return contextualized_inputs[\"input_ids\"], contextualized_inputs[\"attention_mask\"]", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def run_docs(self, *docs):\n self.docs = docs\n self.run()", "def structure_PBDMS_annotations(documents, kb_data):\n \n doc_annotations = list()\n partial_func = partial(parse_PBDMS_doc, kb_data)\n \n with multiprocessing.Pool(processes=10) as pool:\n doc_annotations = pool.map(partial_func, documents)\n \n return doc_annotations", "def process(self, doc):\n raise multisearch.errors.FeatureNotAvailableError", "def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)", "def process_corpus(args):\n\n fs = open(args.input,'r')\n out = list()\n for line in fs:\n blob = TextBlob(line.strip())\n result_info = dict()\n result_info\n result_info['correct'] = str(blob.correct())\n if args.parse :\n result_info['parse'] = get_parsed_text(blob)\n if args.tokenize:\n result_info['tokenize'] = get_tokenizer_result(blob)\n if args.sentiment:\n result_info['sentiment'] = analyze_sentiment(blob)\n if args.sentence_sentiment:\n result_info['sentence_sentiment'] = analyze_sentence_sentiment(blob)\n if args.noun_phrase:\n result_info['noun_phrase'] = get_noun_phrases(blob)\n if args.pos:\n result_info['pos'] = get_pos_tags(blob)\n\n out.append(result_info)\n print out\n json.dump(out,open('out.json','w'))\n fs.close()\n print '******************************* Execution completed *********************************'" ]
[ "0.6669254", "0.6452818", "0.6272669", "0.6263057", "0.6151627", "0.6133174", "0.6116166", "0.60921097", "0.6062387", "0.6043837", "0.5952495", "0.5947582", "0.5938026", "0.5930227", "0.58644366", "0.5852183", "0.58298236", "0.5801322", "0.57992077", "0.57630277", "0.57474416", "0.57435316", "0.570151", "0.56982166", "0.5677628", "0.5668026", "0.56464124", "0.5608801", "0.5586619", "0.55241585" ]
0.8031649
0
Parses the main corpus and IAA gold standard files and joins them in one WebAnnoProject.
def parse_main_iaa(main_dirp, iaa_dirp, opt_fp): # moderator_id = "gilles" # exclude_moderator = lambda x: moderator_id not in Path(x.path).stem # include_moderator = lambda x: moderator_id in Path(x.path).stem main_project = WebannoProject(main_dirp) # # exclude moderator and trial files which start with two digits # main_anndocs_final = [p for p in main_project.annotation_document_fps # if moderator_id not in Path(p).stem and not Path(p).parents[1].stem[0:1].isdigit()] # # iaa_project = WebannoProject(iaa_dirp) # # exclude all annotators except moderator and trial files which start with two digits # iaa_anndocs_final = [p for p in iaa_project.annotation_document_fps # if moderator_id in Path(p).stem and not Path(p).parents[1].stem[0:1].isdigit()] main_project.annotation_document_fps.extend(main_project.annotation_document_fps) main_project.parse_annotation_project() main_project.dump_pickle(opt_fp) print(f"Written project object pickle to {opt_fp}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(url, inputFile, directory, rss, opml, output, verbose, debug, relevanceAlgorithm):\n\n if (len(argv) < 2):\n print(\n \"Usage: python3 ctirt.py [options] [target files]\\n\\n Use --> ctirt.py --help for more details...\"\n )\n exit(1)\n\n if (verbose and url) or (url and debug):\n print(\"URL is mutually exclusive with verbose and debug\")\n exit(1)\n \n \n \n # INITIALIZE DOCUMENTS LIST\n documents = [] # list of document objects\n\n # OPML FILE INPUT\n\n if opml:\n printLogo()\n print(\"\\033[0;34m\" + \"Parsing provided opml file: \" + \"\\033[0m\" + \"\\033[1m\" + opml + \"\\033[0m\")\n\n rssList = parser.parseOpml(opml)\n\n for rss in rssList:\n print(\"Parsing RSS feed: \" + \"\\033[1m\" + rss + \"\\033[0m\")\n\n feed = parser.parseRss(rss)\n \n if not verbose:\n # progress bar\n progressBar = IncrementalBar('\\tParsing URLs in RSS feed:', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n print(\"\\n\\t\" + \"\\033[0;32m\" + u'\\u2713' + \" Done parsing RSS feed: \" + \"\\033[0m\" + \"\\033[1m\" + rss + \"\\033[0m\")\n # RSS INPUT\n\n elif rss:\n printLogo()\n print(\"Parsing\", rss)\n\n feed = parser.parseRss(rss)\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing URLs', max=len(feed.entries), suffix='%(index)d / %(max)d')\n\n for entry in feed.entries:\n document = Document()\n\n document.path = entry.link\n \n document.name, document.text = parser.parseUrl(document.path)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n print(\"Done.\")\n \n # URL INPUT\n \n elif url:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.path = url\n \n document.name, document.text = parser.parseUrl(url)\n \n document.wordCount = parser.countWords(document.text)\n \n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n \n # SINGLE FILE INPUT\n\n elif inputFile:\n printLogo()\n print(\"Parsing...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile)[0]\n document.path = inputFile\n\n if inputFile.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile)\n elif inputFile.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile)\n\n document.wordCount = parser.countWords(document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n print(\"Done.\")\n\n\n # DIRECTORY INPUT\n\n elif directory:\n printLogo()\n if not verbose:\n # progress bar\n progressBar = IncrementalBar('Parsing', max=len(\n os.listdir(directory)), suffix='%(index)d / %(max)d')\n\n # Loop through files in directory\n for inputFile in os.scandir(directory):\n beginningTime = time.time()\n\n if verbose:\n timeStamp = time.time()\n print(\"***[\" + inputFile.name[0:50] + \"]***\", \"is currently being parsed\",\n \"-->\", (timeStamp - beginningTime), \"seconds have elapsed...\")\n\n document = Document()\n\n document.name = os.path.splitext(inputFile.name)[0]\n document.path = inputFile.path\n\n if verbose:\n print(inputFile.name)\n\n if inputFile.name.lower().endswith(\".pdf\"): # PDF Parsing\n document.text = parser.parsePdf(inputFile.path)\n elif inputFile.name.lower().endswith(\".html\"): # HTML Parsing\n document.text = parser.parseHtml(inputFile.path)\n\n document.wordCount = parser.countWords(\n document.text) # Document word count\n\n # Add document object to list, add document wordcount to list\n documents.append(document)\n\n if not verbose:\n progressBar.next()\n else:\n print(\"Done.\")\n \n if not verbose:\n progressBar.finish()\n\n\n # BASIC RELEVANCE CALCULATION\n\n for document in documents:\n document.relevance = relevance.computeBasicRelevance(document.text)\n\n\n # TF-IDF RELEVANCE CALCULATION\n\n if directory and (verbose or debug or relevanceAlgorithm == \"tfidf\"):\n dirWordCount = parser.countDirectoryWords(documents)\n\n wordList = {}\n with open('./assets/wordlist.json') as f:\n jsonWordList = load(f)\n for pair in jsonWordList.items():\n wordList[pair[0]] = float(pair[1])\n\n for document in documents:\n # TODO Figure out how to run - fix arguments (ex. import wordlist), make debug work better by allowing it to work not in verbose\n idfs = relevance.computeIDF(documents, dirWordCount)\n print(\"**************** IDFS ****************\")\n print(idfs)\n tf = relevance.computeTF(wordList, document.wordCount)\n print(\"**************** TF DICT ****************\")\n print(tf)\n\n tfidf = relevance.computeTFIDF(tf, idfs)\n print(\"**************** TF-IDF Values ****************\")\n print(tfidf)\n\n relevanceScore = 0\n\n for word, val in tfidf.items():\n relevanceScore += val\n \n document.tfidf = relevanceScore * 100\n\n\n # OUTPUT SECTION\n\n documents.sort(key=lambda document: document.relevance, reverse=True)\n\n table = []\n tableHeaders = []\n outputData = []\n # print(\"**************** RELEVANCE SCORES ****************\")\n for document in documents:\n outputData.append({'name': document.name[0:30], 'relevance': document.relevance,'path': document.path, 'topTerms': list(document.wordCount.items())[:10]})\n if url or rss or opml: \n table.append([document.name[0:30], document.relevance, document.path])\n tableHeaders = [\"Document\",\"Relevance Score\",\"URL\"]\n elif not verbose:\n table.append([document.name[0:70], document.relevance])\n tableHeaders=[\"Document\",\"Relevance Score\"]\n elif verbose and directory:\n table.append([document.name[0:70], document.relevance, document.tfidf, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"TF-IDF Score\", \"Top Terms\"]\n else:\n table.append([document.name[0:70], document.relevance, list(document.wordCount.items())[:10]])\n tableHeaders=[\"Document\",\"Relevance Score\", \"Top Terms\"]\n\n print(tabulate(table, headers=tableHeaders, tablefmt=\"fancy_grid\"))\n\n # OUTPUT TO FILE\n\n with open(output, 'w', encoding='utf-8') as o:\n dump(outputData, o, indent=3)", "def main(args):\n\tif not os.path.isdir(args.dir):\n\t\tprint \"The specified folder is not a directory.\"\n\t\tsys.exit(1)\n\tNUMBER_OF_FILES = len(os.listdir(args.dir))\n\tif args.num_of_files:\n\t\tNUMBER_OF_FILES = args.num_of_files\n\tprint \"Parsing\", NUMBER_OF_FILES, \"files\"\n\tsql = None\n\tif not args.stdout:\n\t\tsql = sqlite3.connect(args.database)\n\t\tsql.execute(\"\"\"PRAGMA foreign_keys = ON;\"\"\")\n\t\tsql.execute(\"\"\"CREATE TABLE airdates(\n\t\t\tgame INTEGER PRIMARY KEY,\n\t\t\tairdate TEXT\n\t\t);\"\"\")\n\t\tsql.execute(\"\"\"CREATE TABLE documents(\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tclue TEXT,\n\t\t\tanswer TEXT\n\t\t);\"\"\")\n\t\tsql.execute(\"\"\"CREATE TABLE categories(\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tcategory TEXT UNIQUE\n\t\t);\"\"\")\n\t\tsql.execute(\"\"\"CREATE TABLE clues(\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tgame INTEGER,\n\t\t\tround INTEGER,\n\t\t\tvalue INTEGER,\n\t\t\tFOREIGN KEY(id) REFERENCES documents(id),\n\t\t\tFOREIGN KEY(game) REFERENCES airdates(game)\n\t\t);\"\"\")\n\t\tsql.execute(\"\"\"CREATE TABLE classifications(\n\t\t\tclue_id INTEGER,\n\t\t\tcategory_id INTEGER,\n\t\t\tFOREIGN KEY(clue_id) REFERENCES clues(id),\n\t\t\tFOREIGN KEY(category_id) REFERENCES categories(id)\n\t\t);\"\"\")\n\tfor i, file_name in enumerate(glob(os.path.join(args.dir, \"*.html\")), 1):\n\t\twith open(os.path.abspath(file_name)) as f:\n\t\t\tparse_game(f, sql, i)\n\tif not args.stdout:\n\t\tsql.commit()\n\tprint \"All done\"", "def main():\n LESSONS_PATH = os.path.join(LESSON_LOCATOR_DATA, LESSON_SETS[0])\n ORIGINAL_LESSONS_PATH = os.path.join(LESSONS_PATH, \"original\")\n ANNOTATED_LESSONS_PATH = os.path.join(LESSONS_PATH, \"annotated\")\n\n if not os.path.exists(ANNOTATED_LESSONS_PATH):\n os.mkdir(ANNOTATED_LESSONS_PATH)\n\n print(\"Scanning original lessons in %s...\" % ORIGINAL_LESSONS_PATH)\n\n for item in os.listdir(ORIGINAL_LESSONS_PATH):\n if item == \".DS_Store\": continue\n\n print(\" found: %s\" % item)\n\n item_path = os.path.join(ORIGINAL_LESSONS_PATH, item)\n\n lesson_number = None\n lesson_description = None\n mobj = re.search(r'^AY\\s+(\\d+)\\s*-\\s*(.+)\\.txt$', item)\n if mobj:\n lesson_number = mobj.group(1)\n lesson_description = mobj.group(2)\n\n print(\" number: %s\" % lesson_number)\n print(\" description: %s\" % lesson_description)\n\n lesson = dict()\n lesson['number'] = lesson_number\n lesson['description'] = lesson_description\n\n fh = open(item_path)\n lesson_raw_text = fh.read()\n fh.close()\n lesson_text = re.split(r'\\n', lesson_raw_text)\n# lesson_raw_text_reencoded = lesson_raw_text.decode('mac-roman').encode('utf-8')\n# lesson_text = re.split(r'\\n', lesson_raw_text_reencoded)\n\n lesson['text'] = lesson_text\n lesson['parsed'] = parseLesson(lesson_text)\n\n if lesson['parsed']['end_of_lesson'] is None:\n print(\" lesson has no 'end of lesson' marker\")\n\n lesson_json = json.dumps(lesson, indent=4)\n annotated_lesson_path = os.path.join(ANNOTATED_LESSONS_PATH, \"ay_%04d.json\" % int(lesson_number))\n fh = open(annotated_lesson_path, \"w\")\n fh.write(lesson_json)\n fh.close()\n\n else:\n print(\"ERROR: File name not understood: %s\" % item)\n\n return 0", "def documents(iati_import, activity, project, activities_globals):\n imported_docs = []\n changes = []\n\n xml_ns = 'http://www.w3.org/XML/1998/namespace'\n first_image = True\n\n for doc_link in activity.findall('document-link'):\n url = ''\n doc_format = ''\n title = ''\n title_language = ''\n category = ''\n language = ''\n\n if 'url' in doc_link.attrib.keys():\n url = doc_link.attrib['url']\n\n # Check if it's the first image\n if url and url.rsplit('.', 1)[1].lower() in VALID_IMAGE_EXTENSIONS and first_image:\n first_image = False\n continue\n\n if 'format' in doc_link.attrib.keys():\n if not len(doc_link.attrib['format']) > 75:\n doc_format = doc_link.attrib['format']\n else:\n add_log(iati_import, 'document_link_format',\n 'format is too long (75 characters allowed)', project)\n\n # Check if the format is 'application/http'\n if doc_format == 'application/http':\n continue\n\n title_element = doc_link.find('title')\n if not title_element is None:\n title = get_text(title_element, activities_globals['version'])\n if len(title) > 100:\n add_log(iati_import, 'document_link_title',\n 'title is too long (100 characters allowed)', project,\n IatiImportLog.VALUE_PARTLY_SAVED)\n title = title[:100]\n\n if activities_globals['version'][0] == '1' and \\\n '{%s}lang' % xml_ns in title_element.attrib.keys():\n if not len(title_element.attrib['{%s}lang' % xml_ns]) > 2:\n title_language = title_element.attrib['{%s}lang' % xml_ns]\n else:\n add_log(iati_import, 'document_link_title_language',\n 'language is too long (2 characters allowed)', project)\n elif activities_globals['version'][0] == '2':\n narrative_element = title_element.find('narrative')\n if not narrative_element is None and \\\n '{%s}lang' % xml_ns in narrative_element.attrib.keys():\n if not len(narrative_element.attrib['{%s}lang' % xml_ns]) > 2:\n title_language = narrative_element.attrib['{%s}lang' % xml_ns]\n else:\n add_log(iati_import, 'document_link_title_language',\n 'language is too long (2 characters allowed)', project)\n\n category_element = doc_link.find('category')\n if not category_element is None and 'code' in category_element.attrib.keys():\n if not len(category_element.attrib['code']) > 3:\n category = category_element.attrib['code']\n else:\n add_log(iati_import, 'document_link_category',\n 'category is too long (3 characters allowed)', project)\n\n language_element = doc_link.find('language')\n if not language_element is None and 'code' in language_element.attrib.keys():\n if not len(language_element.attrib['code']) > 2:\n language = language_element.attrib['code']\n else:\n add_log(iati_import, 'document_link_language',\n 'language is too long (2 characters allowed)', project)\n\n doc, created = get_model('rsr', 'projectdocument').objects.get_or_create(\n project=project,\n url=url,\n format=doc_format,\n title=title,\n title_language=title_language,\n category=category,\n language=language\n )\n\n if created:\n changes.append(u'added project document (id: %s): %s' % (str(doc.pk), doc))\n\n imported_docs.append(doc)\n\n for doc_link in project.documents.all():\n if not doc_link in imported_docs:\n changes.append(u'deleted project document (id: %s): %s' %\n (str(doc_link.pk),\n doc_link.__unicode__()))\n doc_link.delete()\n\n return changes", "def read_and_clean_files(clueweb_file, ann_file, data_dir, ann_dir):\n annotation_input = fileinput.FileInput(os.path.join(ann_dir, ann_file), openhook=fileinput.hook_compressed)\n annotation_list = []\n for line in annotation_input:\n\tannotation_list.append(Annotation.parse_annotation(line))\n\n warc_path = os.path.join(data_dir, clueweb_file)\n warc_file = warc.open(warc_path)\n print \"Replacing entity mentions for \", clueweb_file, \":\", ann_file, \"...\"\n start = time.time()\n warc_entry = WarcEntry(warc_path, warc_file, annotation_list)\n cleaned_records = warc_entry.replace_entity_mentions()\n end = time.time()\n print \"Time used: \", end - start\n warc_file.close()\n return cleaned_records", "def analyze_text(self):\n\n # Read stylesheet.\n found_stylesheet = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found_stylesheet = True\n style = self.project.open(filename, 'r')\n self._read_stylesheet(style)\n if not found_stylesheet:\n raise IOError('stylesheet not found')\n\n # Process text data.\n for filename in self.project.namelist():\n if filename.endswith('.usx'):\n usx = self.project.open(filename, 'r')\n for text in self._process_usx_file(usx):\n yield text\n # self.exemplars.process(text)\n # self.corpus.write(text + '\\n')", "def main():\n download_insert_title_basics()\n download_insert_title_principals()\n download_insert_name_basics()\n download_insert_title_ratings()\n scrap_keywords()\n create_and_insert_soup()\n return", "def load_known_corpora():\n\n print (\"Loading Corpora\")\n\n # For debugging purposes, comment out the definition of all but say the first\n # corpus, to speed the ingest time.\n shenoute_a22 = Corpus()\n shenoute_a22.annis_corpus_name = \"shenoute.a22\"\n shenoute_a22.title = \"Acephalous Work 22\"\n shenoute_a22.slug = \"acephalous_work_22\"\n shenoute_a22.urn_code = \"shenoute.a22\"\n shenoute_a22.github = \"https://github.com/CopticScriptorium/corpora/tree/master/shenoute-a22\"\n\n patrum = Corpus()\n patrum.annis_corpus_name = \"apophthegmata.patrum\"\n patrum.title = \"Apophthegmata Patrum\"\n patrum.slug = \"ap\"\n patrum.urn_code = \"ap\"\n patrum.github = \"https://github.com/CopticScriptorium/corpora/tree/master/AP\"\n\n saof = Corpus()\n saof.annis_corpus_name = \"shenoute.abraham.our.father\"\n saof.title = \"Abraham Our Father\"\n saof.slug = \"abraham_our_father\"\n saof.urn_code = \"shenoute.abraham\"\n saof.github = \"https://github.com/CopticScriptorium/corpora/tree/master/abraham\"\n\n besa_ap = Corpus()\n besa_ap.annis_corpus_name = \"besa.letters\"\n besa_ap.title = \"Letter to Aphthonia\"\n besa_ap.slug = \"to_aphthonia\"\n besa_ap.urn_code = \"besa.aphthonia\"\n besa_ap.github = \"https://github.com/CopticScriptorium/corpora/tree/master/besa-letters\"\n\n fox = Corpus()\n fox.annis_corpus_name = \"shenoute.fox\"\n fox.title = \"Not Because a Fox Barks\"\n fox.slug = \"not_because_a_fox_barks\"\n fox.urn_code = \"shenoute.fox\"\n fox.github = \"https://github.com/CopticScriptorium/corpora/tree/master/shenoute-fox\"\n\n mark = Corpus()\n mark.annis_corpus_name = \"sahidica.mark\"\n mark.title = \"Gospel of Mark\"\n mark.slug = \"gospel_of_mark\"\n mark.urn_code = \"nt.mark\"\n mark.github = \"https://github.com/CopticScriptorium/corpora/tree/master/bible\"\n\n corinth = Corpus()\n corinth.annis_corpus_name = \"sahidica.1corinthians\"\n corinth.title = \"1 Corinthians\"\n corinth.slug = \"1st_corinthians\"\n corinth.urn_code = \"nt.1cor\"\n corinth.github = \"https://github.com/CopticScriptorium/corpora/tree/master/bible\"\n\n snt = Corpus()\n snt.annis_corpus_name = \"sahidica.nt\"\n snt.title = \"New Testament\"\n snt.slug = \"new-testament\"\n snt.urn_code = \"nt\"\n snt.github = \"https://github.com/CopticScriptorium/corpora/tree/master/bible\"\n\n eager = Corpus()\n eager.annis_corpus_name = \"shenoute.eagerness\"\n eager.title = \"I See Your Eagerness\"\n eager.slug = \"eagernesss\"\n eager.urn_code = \"shenoute.eagerness\"\n eager.github = \"https://github.com/CopticScriptorium/corpora/tree/master/shenoute-eagerness\"\n\n besa_nuns = Corpus()\n besa_nuns.annis_corpus_name = \"besa.letters\"\n besa_nuns.title = \"Letter to Thieving Nuns\"\n besa_nuns.slug = \"to_thieving_nuns\"\n besa_nuns.urn_code = \"besa.thieving\"\n besa_nuns.github = \"https://github.com/CopticScriptorium/corpora/tree/master/besa-letters\"\n\n doc_pap = Corpus()\n doc_pap.annis_corpus_name = \"doc.papyri\"\n doc_pap.title = \"Documentary Papyri\"\n doc_pap.slug = \"papyri\"\n doc_pap.urn_code = \"copticDoc:papyri_info\"\n doc_pap.github = \"https://github.com/CopticScriptorium/corpora/tree/master/doc-papyri\"\n\n known_corpora = [shenoute_a22, patrum, saof, besa_ap, fox, mark, corinth, snt, eager, besa_nuns, doc_pap]\n# known_corpora = [shenoute_a22]\n \n for one in known_corpora:\n try:\n Corpus.objects.get(slug__exact=one.slug)\n except Corpus.DoesNotExist:\n one.save()", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def merge_articles(docs_folder):\n\n s = \"\"\n \n for doc in os.listdir(docs_folder):\n try:\n with open(docs_folder + doc ,'r') as f:\n\n lines = f.readlines()\n raw_doc = \"\".join(txt for txt in lines)\n left_idx_headline = [ m.end(0) for m in re.finditer(r\"<HEADLINE>\",raw_doc)]\n right_idx_headline = [ m.start(0) for m in re.finditer(r\"</HEADLINE>\",raw_doc)]\n\n left_idx_text = [ m.end(0) for m in re.finditer(r\"<TEXT>\",raw_doc)]\n right_idx_text = [ m.start(0) for m in re.finditer(r\"</TEXT>\",raw_doc)]\n\n raw_headline = raw_doc[left_idx_headline[0]:right_idx_headline[0]]\n raw_text = raw_doc[left_idx_text[0]:right_idx_text[0]]\n\n left_idx_paragraph_headline = [ m.end(0) for m in re.finditer(r\"<P>\",raw_headline)]\n right_idx_paragraph_headline = [ m.start(0) for m in re.finditer(r\"</P>\",raw_headline)]\n\n left_idx_paragraph_text = [ m.end(0) for m in re.finditer(r\"<P>\",raw_text)]\n right_idx_paragraph_text = [ m.start(0) for m in re.finditer(r\"</P>\",raw_text)]\n\n for i in range(len(left_idx_paragraph_headline)):\n s += raw_headline[left_idx_paragraph_headline[i]:right_idx_paragraph_headline[i]-2] + \".\"\n\n for i in range(len(left_idx_paragraph_text)):\n s += raw_text[left_idx_paragraph_text[i]:right_idx_paragraph_text[i]-1]\n except:\n pass\n\n return s", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def main():\n\tfl = '/home/rupesh20/ProjectFinal/IITB/conv.txt'\n\tPconv = '/home/rupesh20/ProjectFinal/IITB/prevconv.txt'\n filename = '/home/rupesh20/ProjectFinal/IITB/NEw.txt'\n outfile = '/home/rupesh20/ProjectFinal/IITB/Demo.txt'\n outputfile ='/home/rupesh20/ProjectFinal/IITB/Test.txt'\n\n\t\"\"\" \n\t\tOther comments are for debugging.\n\t\"\"\"\n #filename = '/home/rupesh20/ProjectFinal/final/en_US/en_US.blogs.txt'\n #newfilename = '/home/rupesh20/ProjectFinal/IITB/NEw.txt'\n #file = open(filename,'r')\n #newfile = open(newfilename,'w')\n\n #text = file.read()\n #file.close()\n #words = text.split()\n\t\n #new = []\n #new=lowercase(words,0,new)\n #new=url(new,0)\n #new=unwanted(new)\n #new=DictLook(new)\n\t\t\n\twith open(Pconv) as f:\n \t\tlines = f.readlines()\n\t\"\"\" \n\t\tlines : contains list of strings in english for conversion \n\t\"\"\"\n #Writer(new,newfile)\n #newfile.close()\n\n\t\"\"\" \n\t\tObjects for files are opened\n\t\"\"\"\n\ttxt = open(fl,'w')\n Tobj1=open(outputfile,'w')\n #Ttext1=Tobj1.read()\n #Tobj1.close()1\n Nobj=open(filename,'r')\n Ntext=Nobj.read()\n Nobj.close()\n Nobj1=open(outfile,'r')\n Ntext1=Nobj1.read()\n Nobj1.close()\n\t\"\"\" \n\t\tSentences are Tokenized \n\t\"\"\"\n sentences=sent_tokenize(Ntext)\n testSentences=sent_tokenize(Ntext1)\n \n Csize=0\n\t\"\"\" \n\t\tAfter Tokenize the text in sentences \n\t\tWe have calculated the Ngram \n\t\t1,2 3,4,5 grams\n\t\tGram1 list contains uni-gram\n\t\t||ly Gram2,3,4,5 contains further grams\n\t\t\n\t\"\"\"\n for sent in sentences:\n for j in xrange(1,6):\n Csize= Ngram(sent,j,Csize)\n print Csize \n FreqCount(Gram)\n for sent in testSentences:\n TestNgram(sent,5)\n #print Gram5\n #print Sliced(x)\n \n\t\"\"\"\" \n\t\tCalcScore : function to ecaluate the score using the \n\t\t\t stupid backoff algo\n\t\toutput file : generated after the calcscore module , sent : Score\n\t\t\t\n\t\"\"\"\n\tCalcScore(Csize,Tobj1)\n\n\t\"\"\" \n\t\tProcesscontent : module is for Alignment, S-O-V \n\t\t\t\tdetermization.\n\t\n\t\"\"\"\n\n\t\"\"\" \n\t\tbelow code translate, del list[:]( delete whole list )\n\t\n\t\"\"\"\n\tfor line in lines:\n\t\ttemp = []\n\t\ttemp.append(line)\t\n\t\tprocessContent(temp)\n\t\tfor i in Subj:\n\t\t\tBilingDict(txt,i)\n\t\tfor i in obj:\n\t\t\tBilingDict(txt,i)\n\t\tfor i in verb:\n\t\t\tBilingDict(txt,i)\t\n\t\tdel Subj[:]\n\t\tdel verb[:]\n\t\tdel obj[:]\n\t\t\n\t\ttxt.write('\\n')\n\n\t#print Subj[0] \n\t#print verb\n\t#print obj\n\t\"\"\" \n\t\tBilingdict : module contain JSON lookup table.\n\t\t\t\tgenrates a file with HINDI text.\n\n\t\"\"\"\n\t\t\t \n\t#print StupidBackoff(x,Sliced(x),Csize)\n #print CountGram1\n #print CountGram2\n #print CountGram3\n #print CountGram4\n #freq.plot(50)", "def main() -> None:\n\n # Retrieving the wiki URL\n url = get_api_url()\n print(url)\n\n # Creates file if it does not exist\n open(TEXT_FILE, \"a\")\n\n with open(TEXT_FILE, \"r\") as f:\n last_title = f.readline().strip()\n print(\"Starting from:\", last_title)\n\n # Retrieving the pages JSON and extracting page titles\n pages_json = get_pages_json(url, last_title)\n pages = pages_json[\"query\"][\"allpages\"]\n print(\"Pages to be scanned:\", pages)\n\n # Tagging operations\n for page in pages:\n curr_title = page[\"title\"]\n cats_to_add = get_categories(curr_title)\n if cats_to_add:\n print(\"Adding categories\", cats_to_add, \"to '%s'\" % curr_title)\n for cat in cats_to_add:\n add_category(curr_title, \"[[Category:\" + cat + \"]]\")\n\n # Extracting title to continue iterating from next run\n if \"continue\" in pages_json:\n continue_from_title = pages_json[\"continue\"][\"apcontinue\"]\n print(\"Continuing from:\", continue_from_title, \"next run.\")\n else:\n continue_from_title = \"\"\n\n with open(TEXT_FILE, \"w+\") as f:\n f.write(continue_from_title)\n print(\"Wrote\", continue_from_title, \"in\", TEXT_FILE)\n\n print(\"No pages left to be tagged\")", "def main():\n files = init.file_list\n citations = load_citations.load_files(files)\n citations_a = citations[0]\n citations_b = citations[1]\n\n common_citations, num_common = compare_citations.common_citations(citations_a, citations_b)\n save_citations.save_citations(common_citations)", "def build_training_data():\r\n for i in range(len(FILE_NAMES)):\r\n input_text = read_file(FILE_NAMES[i])\r\n list_of_word_lines = limiting_sentence_length(input_text)\r\n data = create_training_data_file(list_of_word_lines, LANGUAGE[i])\r\n write_training_data(data, LANGUAGE[i])\r\n merge_training_data()", "def Main(root_directory):\n filepaths = GetAllFilepaths(root_directory)\n for filepath in filepaths:\n parser = fileparser.CreateParser(filepath)\n if not parser:\n ReportWarning('cannot find a parser for file %s, skipping...' %\n filepath)\n continue\n old_file_contents = ReadFileIntoString(filepath)\n comment_blocks = parser.FindAllCommentBlocks(old_file_contents)\n if not comment_blocks:\n ReportWarning('cannot find any comment blocks in file %s' %\n filepath)\n old_copyright_block = parser.FindCopyrightBlock(comment_blocks)\n if not old_copyright_block:\n ReportWarning('cannot find copyright block in file %s' % filepath)\n (year, holder) = parser.GetCopyrightBlockAttributes(old_copyright_block)\n if holder and not ConfirmAllowedCopyrightHolder(holder):\n ReportWarning(\n 'unrecognized copyright holder \"%s\" in file %s, skipping...' % (\n holder, filepath))\n continue\n new_copyright_block = parser.CreateCopyrightBlock(year, holder)\n if old_copyright_block:\n new_file_contents = old_file_contents.replace(\n old_copyright_block, new_copyright_block, 1)\n else:\n new_file_contents = new_copyright_block + old_file_contents\n WriteStringToFile(new_file_contents, filepath)", "def test2_basic_info(self):\n\t\tprint \"\\nTEST 2: Extracting basic info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\t\t\t\t\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintBasicInfo(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")", "def main():\n # %%\n CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True)\n fpaths = list( _Config.raw_profiles_path.glob('*.html') )\n print( f'{len(fpaths)} htmls found' )\n # %%\n fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html'\n # %%\n fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html'\n # %%\n fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ]\n # %%\n fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')]\n # %%\n dics = {}\n # %%\n\n for i, fpath in enumerate(fpaths):\n if fpath in dics:\n continue\n\n with fpath.open('rt') as f_in:\n html = f_in.read()\n\n print( f'\\n***{i+1}/{len(fpaths)} {fpath.name}:')\n dic = extract_one( html, fpath )\n dic['linkedin_url'] = f\"https://www.linkedin.com/in/{fpath.name.split('.')[0]}\"\n dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )\n # pprint(dic['work_stats'])\n dics[fpath] = dic\n\n dics_arr = list(dics.values())\n # %%\n del dics\n # %%\n\n with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out:\n json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 )\n # %%\n with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out:\n yaml.safe_dump( dics_arr, f_out )\n # %%\n df = produce_summary_table( dics_arr )\n df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx',\n index=False)\n # %%", "def mk_eng_txt_files(self, **rewrite):\n\n\n print \"mk_eng_txt_files: RETRIEVING PAGE_LIST..........\"\n page_list = c_m.l_of_l_read(self.page_list_path)\n\n if len(page_list) < 1: # handling empty page_list case\n print \"mk_eng_txt_files: PAGE LIST IS NOT POPULATED, RUN HTML_File_Maker AND Text_Extractor MODULES FIRST\"\n else: # handling page_list for partially transalted sites\n print \"mk_eng_txt_files: IN CASE PAGE LIST ALREADY HAD SOME ENG_TEXT ENTRIES SETTING INITIAL new_page_list TO LAST KNOWN PAGE_LIST VERSION\"\n self.new_page_list = copy(page_list)\n\n\n\n # iterating throug unique text per page txts\n for data_set in page_list:\n self.p_text_f_name = data_set[2]\n print \"mk_eng_txt_files: TRANSLATING TEXT FROM FILE %s\" % self.p_text_f_name\n\n self.eng_p_text_f_name = \"eng_\" + self.p_text_f_name\n self.eng_p_text_f_path = self.text_eng_folder_path + self.eng_p_text_f_name\n\n self.page_text = c_m.simply_read(self.text_folder_path, self.p_text_f_name)\n\n # if page has less than 10 symbols it is not translated\n if len(self.page_text) < 10:\n print \"mk_eng_txt_files: NOT WORTH TRANSLATING, WRITING AS IS AND SKIPPING...\"\n c_m.simply_write(self.page_text, self.eng_p_text_f_path)\n\n elif len(self.page_text) > self.max_page_length:\n print \"mk_eng_txt_files: PAGE TEXT IS TOO LONG DEVIDING TO PARTS, TRANSLATING AND GETTING BACK FULL PAGE TEXT\"\n text_output = self.get_text_parts(**rewrite)\n\n else: # 10 < len(page_text) < 2000\n\n if rewrite[\"rewrite\"]:\n print \"mk_eng_txt_files: TRANSLATING IN REWRITE MODE\"\n text_output = self.get_text()\n\n\n elif not os.path.exists(self.eng_p_text_f_path):\n print \"mk_eng_txt_files: TRANSLATING IN ONLY ONCE MODE\"\n text_output = self.get_text()\n\n else:\n print \"mk_eng_txt_files: SKIPPING FILE, ALREADY TRANSLATED\"\n # continue\n\n # print \"WRITING TRANSLATED OUTPUT TO FILE: \", self.eng_p_text_f_name\n # c_m.simply_write(text_output, self.text_eng_folder_path, self.eng_p_text_f_name) \n data_set.append(self.eng_p_text_f_name) # updating dataset with eng_text file name\n self.new_page_list.append(data_set) # updating page list with updated entry\n\n\n print \"mk_eng_txt_files: DONE TRANSLATING SITE %s \" % self.domain\n print \"mk_eng_txt_files: UPDATING PAGE LIST WITH ENG TEXT FILE NAMES\"\n c_m.l_of_l_write(self.new_page_list, self.page_list_path)\n print \"mk_eng_txt_files: SITE TRANSLATION FINISHED, CLOSING CHROME WEBDIRVER\"\n self.loaded_driver.quit()", "def preMain(dictADT):\r\n data_addr = \"..\\\\csv\\\\normal.csv\"\r\n\r\n userPref = dictADT\r\n mainPref(\"base.txt\", userPref)\r\n\r\n sample_path = \"base.txt\"\r\n webing(sample_path, data_addr)\r\n\r\n path_in = \"base.txt\"\r\n path_out = \"..\\\\resultHTML\\\\result.html\"\r\n mover(path_in, path_out)\r\n\r\n baseDel(\"base.txt\")\r\n # baseDel(\"resultHTML\\\\result.html\")\r\n baseDel(\"..\\\\csv\\\\normal.csv\")\r\n return True", "def main():\n\n # Remove the files if existing before creating new ones\n OUT_VOCAB = \"results/vocabs.txt\"\n OUT_SYN = \"results/syn.txt\"\n OUT_EXCP = \"results/exception.txt\"\n for f in [OUT_SYN, OUT_VOCAB, OUT_EXCP]:\n if os.path.isfile(f):\n os.remove(f)\n\n \"\"\"\n Now we do the recursion:\n Step 1: Find the children link;\n Step 2: Add to the url_list;\n Step: Move to next entry of the url_list and Go to Step 1;\n \"\"\"\n with open(DATA, \"r\") as fin:\n url_list = fin.readlines()\n\n # Write down the urls as tuples, 0 as the first level\n url_list = [((PREFIX_URL + subURL).strip(), 0) for subURL in url_list]\n\n with open(OUT_VOCAB, \"w\") as vocab_file, \\\n open(OUT_SYN, \"w\") as syn_file, \\\n open(OUT_EXCP, \"w\") as exp_file:\n cur_index = 0\n while cur_index < len(url_list):\n # Current url of the term, current level number (hierarchy) of the term\n cur_url, cur_hrc = url_list[cur_index]\n try:\n # Get the current sub-url\n time.sleep(0.1)\n r = requests.get(cur_url + \"?\" + API_KEY)\n source_dict = json.loads(r.text)\n\n # Getting the vocabulary and the synonym from this page.\n vocab, syn = parse_words(source_dict=source_dict)\n if cur_hrc + 1 < int(level):\n # Fetch all the children's name and url\n children_urls = fetch_children_url(source_dict=source_dict, hrc=cur_hrc + 1)\n # print children_urls\n url_list = url_list + children_urls # Concatenate the lists\n\n if vocab is not None:\n syn_file.write(json.dumps({vocab: syn}) + \"\\n\")\n vocab_file.write(vocab + \"\\n\")\n\n if cur_index % 100 == 0:\n print \"\\tNow at index # {}, {} in total.\".format(str(cur_index), str(len(url_list)))\n except UnicodeEncodeError as unicode_err:\n exp_file.write(str(unicode_err))\n except KeyError as key_err:\n exp_file.write(str(key_err))\n except:\n pass\n cur_index += 1", "def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()", "def test_project(self, doi_dataset, doi_bib, orcid, metadata_parser):\n apply_mock(doi_dataset, doi_bib, orcid, metadata_parser)\n os.chdir(\"input/\")\n _set_args(\"-i\",\"yamls/project.yaml\",\"-o\",\"../out\")\n with HiddenPrints():\n ya2ro.main()\n\n data = yaml.load(\"yamls/project.yaml\", Loader=SafeLoader)\n with open(\"../out/project/index-en.html\") as f:\n web = f.read()\n\n self.assert_data_in_web(data, web)", "def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()", "def main():\n\n global APP_NAME\n\n # Defines Argument Parser and fefines flags and expected inputs\n parser = argparse.ArgumentParser(\n description='Converts specified html files or all html files to \\\n\t\t\tdjango format within a \\n specified directory.'\n )\n # Defines the -f flag, standing for files, to gey file nameof the HTML\n # file to convert\n parser.add_argument(\n 'files',\n metavar='f',\n type=str,\n nargs='*',\n help='provide file names to convert'\n )\n # Defines the -a flag, for defining the APP_NAME, you want the file\n # converted to, for.\n parser.add_argument(\n '-a',\n dest='app_name',\n type=str,\n nargs='?',\n help='provide django app name'\n )\n # Defines the -d flag, standing for directory, which accepts the path\n # to a directory containing the files to be translated\n parser.add_argument(\n '-d',\n dest='base_directory',\n type=str,\n nargs='?',\n help='Provide base directory'\n )\n\n # Parse the Arguments from the user\n args = parser.parse_args()\n\n # Deconstruct the arguments from the parser\n files = args.files\n directory = args.base_directory\n app_name = args.app_name\n\n # If APP_NAME is not passes in as an argument, leave it as ''(empty)\n if app_name is not None:\n APP_NAME = app_name + \"/\"\n\n # If directory is not passed in as an argument, use the current working\n # directory to fetch files\n if directory is None:\n directory = os.getcwd()\n\n logging.info(\"Directory : \" + str(directory))\n logging.info(\"app_name : \" + str(app_name))\n\n # Check if the directory passed in as argument already has the directory \n # 'Modified_files', else create it.\n if not os.path.exists(os.path.join(directory, \"Modified_files\")):\n os.mkdir(os.path.join(directory, \"Modified_files\"))\n\n if files != []:\n for file in files:\n processFile(directory, directory + \"/\" + file, file)\n\n else:\n # If no file was passed in as input, then extract all files in the \n # directory passed in, with extension '.html'\n for file in os.listdir(directory):\n if file.endswith(\".html\"):\n processFile(directory, directory + \"/\" + file, file)", "def main(base_path):\n current = os.getcwd()\n try:\n if not(os.path.exists(base_path)):\n ans = 'y'\n if p_out:\n print(\"Do you want to create \" + base_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n pass\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n else:\n m_path = os.path.join(base_path, 'nzmath/manual')\n if os.path.exists(m_path):\n ans = 'y'\n if p_out:\n print(\"Do you want to remove \" + m_path + \"?(y/n)\")\n ans = sys.stdin.read(1)\n print(\"\")\n if ans in ('y', 'Y'):\n for root, dirs, files in os.walk(m_path, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n elif ans in ('n', 'N'):\n raise NoneOutput\n else:\n raise InputError\n dirname = os.path.join(base_path, 'nzmath/manual/modules')\n if not(os.path.exists(dirname)):\n os.makedirs(dirname)\n os.chdir(os.path.join(base_path, 'nzmath/manual/'))\n csspage = convertHPURL('manual/default.css')\n if p_out:\n print(\"get css from \" + csspage)\n retryConnection(urllib.request.urlretrieve, csspage, 'default.css')\n while ad_list:\n files = ad_list.pop()\n MyWikiParser(files).feeds()\n if p_out:\n print(\"\\n\" + \"All process is done!\" + \"\\n\")\n print(\"Ok, now created nzmath-current manual located to\")\n print(os.path.join(base_path, \"nzmath\"))\n print(\"if you check difference between nzmath-cvs manual, with GNU diff,\")\n print(\"$ diff -ubBr /tmp/nzmath/manual {your-nzmathcvs-repo}/manual\")\n print(\"or you check only new version files,\")\n print(\"$ diff -r --brief /tmp/nzmath/manual {your-nzmathcvs-repo}/manual .\")\n except NoneOutput:\n if p_out:\n print('end.')\n except InputError:\n print(\"Error: Invalid input!\")\n except LookupError:\n print(\"Error: Maybe, Japanese encodings(ex.euc_jp) is not supported.\")\n except:\n if p_out:\n print(\"Check \" + base_path + \" (dir? truly path? and so on.)\")\n print(\"Delete \" + base_path + \" and try again.\")\n print(\"(Maybe, caused by problem of network connection)\\n\")\n print(sys.exc_info()[0])\n os.chdir(current)", "def parse_and_analyse_corenlp_coref(input_dir = 'CoreNLP_coref_anno/dev', gold_annotations_folder = '../../../data/baseline/dev'):\n\tmentions = []\n\n\n\twith open('coref_analyse_output.txt', 'w') as out_file:\n\n\t\tfor file_name in os.listdir(input_dir):\n\t\t\tif re.match(r'(.+)\\.xml', file_name)!= None:\n\t\t\t\tokr_graph = load_graph_from_file(gold_annotations_folder + '/'+ re.match(r'(.+)\\.xml', file_name).group(1)[:-4]+'.xml')\n\n\t\t\t\ttree = ET.parse(input_dir + '/' + file_name)\n\t\t\t\tdocument = tree.getroot()[0]\n\t\t\t\tsentence_wise_predicted_mentions = defaultdict(list)\n\t\t\t\tsentence_wise_gold_mentions = defaultdict(list)\n\t\t\t\tpredicted_coref_dict = defaultdict(list)\n\t\t\t\tgold_coref_dict = defaultdict(list)\n\n\t\t\t\tcoref_node = document.find('coreference')\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\t\t\tfor mention in coref_chain:\n\t\t\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\t\t\ttext = mention[4].text\n\t\t\t\t\t\tsentence_wise_predicted_mentions[sent_num].append({\"indices\":range(start, end),\"coref\":coref_id+1, \"text\":text})\n\t\t\t\t\t\tpredicted_coref_dict[coref_id+1].append({\"indices\":range(start, end), \"s_num\":sent_num, \"text\":text })\n\n\n\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tfor mention in entity.mentions.values():\n\t\t\t\t\t\tsentence_wise_gold_mentions[mention.sentence_id].append({\"indices\":mention.indices,\"coref\":entity.id, 'text':mention.terms})\n\n\t\t\t\tprint'###'+ file_name + '\\n'\t\n\t\t\t\tfor sentence_id, sentence in enumerate(okr_graph.sentences.values()):\n\t\t\t\t\tprint 'Sentence: ', ' '.join(sentence) \n\t\t\t\t\tprint 'Predicted entities: ', [element['text'] for element in sentence_wise_predicted_mentions[sentence_id+1]]\n\t\t\t\t\tprint 'Gold entities: ', [element['text'] for element in sentence_wise_gold_mentions[sentence_id+1]]\n\t\t\t\t\tprint ' '\n\t\t\t\n\t\t\t\tprint \"Not printing singletons\"\n\t\t\t\tprint('\\nThe predicted clusters: ')\n\t\t\t\tfor cluster_id, cluster in enumerate(predicted_coref_dict.values()):\n\t\t\t\t\tprint('Cluster id: ', cluster_id +1)\n\t\t\t\t\tprint([[okr_graph.sentences[mention['s_num']][index] for index in mention['indices']]for mention in predicted_coref_dict[cluster_id+1]] )\n\n\t\t\t\tprint('\\n The Gold clusters:')\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tprint('cluster_id: ', entity.id )\n\t\t\t\t\tprint([mention.terms for mention in entity.mentions.values()])\n\n\t\t\t\tprint '**********'", "def main():\n\n for lang, family, host, db, footer, summary in projects:\n wiki = wikipedia.Site(lang, family)\n conn = MySQLdb.connect(host=host, db=db, read_default_file='~/.my.cnf', use_unicode=True)\n cursor = conn.cursor()\n cursor.execute(\"SELECT page_title from page where page_namespace=0 and page_title regexp \\\".*[–-].*\\\";\")\n row = cursor.fetchone()\n endashes = sets.Set()\n hyphens = sets.Set()\n while row:\n pagetitle = re.sub(ur\"_\", ur\" \", unicode(row[0], \"utf-8\"))\n if re.search(ur\"–\", pagetitle) and not re.search(ur\"[-\\(\\)]\", pagetitle): #descartamos las que tienen paréntesis, (cación) (desambiguación)...\n endashes.add(pagetitle)\n if not re.search(ur\"–\", pagetitle) and re.search(ur\"-\", pagetitle):\n hyphens.add(pagetitle)\n row = cursor.fetchone() \n cursor.close()\n conn.close()\n\n print len(endashes), \"endashes\"\n\n for pagetitle in endashes:\n footer2=footer\n pagetitle_ = re.sub(ur\"–\", ur\"-\", pagetitle)\n if pagetitle_ not in hyphens:\n #creamos\n redirect = wikipedia.Page(wiki, pagetitle_)\n if not redirect.exists():\n time.sleep(3)\n target = wikipedia.Page(wiki, pagetitle)\n if target.exists():\n if target.isRedirectPage():\n footer2=\"\"\n target = target.getRedirectTarget()\n if target.exists() and not target.isRedirectPage():\n redirect.put(ur\"#REDIRECT [[%s]]%s\" % (target.title(), footer2), u\"BOT - %s [[%s]]\" % (summary, target.title()))\n print ur\"#REDIRECT [[%s]]%s\" % (target.title(), footer2)\n else:\n pass #demasiadas redirecciones anidadas\n else:\n redirect.put(ur\"#REDIRECT [[%s]]%s\" % (target.title(), footer2), u\"BOT - %s [[%s]]\" % (summary, target.title()))\n print ur\"#REDIRECT [[%s]]%s\" % (target.title(), footer2)", "def derive_project(\n session_directory, corpus_filter, session_filter, pre_select=1, post_select=0\n):\n # Change to the session directory\n with change_dir(session_directory):\n # Get the parent directory of the session directory\n parent_directory = os.path.dirname(session_directory)\n\n # Check if 'project.xml' exists in the parent directory\n if \"project.xml\" not in os.listdir(parent_directory):\n raise FileNotFoundError(\"project.xml not found in parent project directory\")\n\n # Determine excluded 'Pre' files based on the pre-select option\n if pre_select == 1:\n # Exclude 'Pre' files with a single digit\n exclude_pre_list = [\n e for e in os.listdir(session_directory) if re.search(r\"Pre\\s?\\d\", e)\n ]\n if pre_select == 2:\n # Exclude 'Pre' files with a single digit and rename files with ' 2' suffix\n multi_pre_list = [\n e for e in os.listdir(session_directory) if re.search(r\"Pre\\s?\\d\", e)\n ]\n exclude_pre_list = [e.replace(\" 2\", \"\") for e in multi_pre_list]\n\n # Determine excluded 'Post' files based on the post-select option\n if post_select == 1:\n # Exclude 'Post II' files\n exclude_post_list = [\n e for e in os.listdir(session_directory) if re.search(r\"Post\\s?II\", e)\n ]\n if post_select == 2:\n # Exclude 'Post II' files and remove ' II' suffix\n multi_post_list = [\n e for e in os.listdir(session_directory) if re.search(r\"Post\\s?II\", e)\n ]\n exclude_post_list = [e.replace(\" II\", \"\") for e in multi_post_list]\n # Create corpus folders\n for corpus in corpus_filter:\n try:\n os.mkdir(os.path.join(parent_directory, corpus))\n except FileExistsError:\n print(f\"{corpus} directory already exists. Adding to folder\")\n # Organize all files\n for e in os.listdir(session_directory):\n assert os.path.isfile(e), \"Error: Subdirectory found.\"\n for corpus in corpus_filter:\n # Don't include excluded Pre files specified by pre-select option\n if pre_select > 0:\n if e in exclude_pre_list:\n continue\n # Don't include excluded Post files specified by pre-select option\n if post_select > 0:\n if e in exclude_post_list:\n continue\n # Copy the file to the appropriate corpus folder if it matches the filters\n if fnmatch.fnmatch(e, \"*\" + corpus + \"*\"):\n for key in session_filter:\n if fnmatch.fnmatch(e, \"*\" + key + \"*\"):\n shutil.copy(e, os.path.join(parent_directory, corpus, e))\n\n else:\n continue\n else:\n continue\n\n return" ]
[ "0.56196046", "0.5577734", "0.5525874", "0.5449749", "0.53875864", "0.53493977", "0.53235525", "0.5311306", "0.5266425", "0.5258258", "0.52553433", "0.5223228", "0.5213364", "0.51755184", "0.51462036", "0.5135373", "0.513461", "0.512963", "0.5127664", "0.51267636", "0.51189923", "0.5118192", "0.5117418", "0.5115215", "0.5092166", "0.5076811", "0.50628084", "0.5062126", "0.5060258", "0.50514185" ]
0.6385642
0
Update the last times attributes of the AddressStatus and of all the processes running on it.
def update_times(self, remote_time, local_time): self.remote_time = remote_time self.local_time = local_time for process in self.processes.values(): process.update_times(self.address_name, remote_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _UpdateProcessingStatus(self, pid, process_status, used_memory):", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status", "def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes", "def update_status(self):\n\n # first get the instances we need to check\n monitor_jobs = {}\n for _, job_node in self.get_executions_iterator():\n if job_node.is_job:\n for job_instance in job_node.instances:\n if not job_instance.simulate:\n if job_instance.host in monitor_jobs:\n monitor_jobs[job_instance.host]['names'].append(\n job_instance.name)\n else:\n monitor_jobs[job_instance.host] = {\n 'config': job_instance.monitor_config,\n 'type': job_instance.monitor_type,\n 'workdir': job_instance.workdir,\n 'names': [job_instance.name],\n 'period': job_instance.monitor_period\n }\n else:\n job_instance.set_status('COMPLETED')\n\n # nothing to do if we don't have nothing to monitor\n if not monitor_jobs:\n return\n\n # then look for the status of the instances through its name\n states = self.jobs_requester.request(monitor_jobs, self.logger)\n\n # finally set job status\n for inst_name, state in states.iteritems():\n self.job_instances_map[inst_name].set_status(state)\n\n # We wait to slow down the loop\n sys.stdout.flush() # necessary to output work properly with sleep\n time.sleep(LOOP_PERIOD)", "def update_balances(self):\n self.semaphore.acquire(timeout=2)\n try:\n last_block = get_last_block(self.node_address)\n for address in self.addresses.keys():\n self.addresses[address]['balance'] = \\\n last_block.current_state_balances.get(address, self.addresses[address]['balance'])\n finally:\n self.semaphore.release()", "def update(self):\n\n self._state = get_balance(self.addresses)", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def update(self):\n if self.process:\n retcode = self.process.poll()\n # Windows exit code\n if retcode is None:\n # logging.debug(\"Update {}, Process: {}, RUNNING\".format(self.hash,self.process))\n self.status = \"Running\"\n else:\n # Add more handling for irregular retcodes\n # See i.e. http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description\n # logging.debug(\"Update {}, Process: {}, DONE\".format(self.hash,self.process))\n self.run_status = \"Finished\"\n self.finish_time = datetime.datetime.now()\n else:\n # This process has not been started]\n raise\n pass", "def update(self, status):\n\n for name, c in self.children.items():\n c.update(status.child(name))", "def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")", "def update_from_latest_data(self) -> None:\n data = self.coordinator.data[self.entity_description.uid]\n\n self._attr_is_on = bool(data[\"status\"])\n\n next_run: str | None\n if data.get(\"nextRun\") is None:\n next_run = None\n else:\n next_run = datetime.strptime(\n f\"{data['nextRun']} {data['startTime']}\",\n \"%Y-%m-%d %H:%M\",\n ).isoformat()\n\n self._attr_extra_state_attributes.update(\n {\n ATTR_ID: self.entity_description.uid,\n ATTR_NEXT_RUN: next_run,\n ATTR_SOAK: data.get(\"soak\"),\n ATTR_STATUS: RUN_STATE_MAP[data[\"status\"]],\n ATTR_ZONES: [z for z in data[\"wateringTimes\"] if z[\"active\"]],\n }\n )", "def update_attr_par(self):\n\n # Retrieve all current values\n self.all_values_temp = nx.get_node_attributes(self.G, 'value')\n\n # Calculate all new values\n new_values_list = Parallel(n_jobs=2)(delayed(self.single_node_update)(i) \\\n for i in range(self.n_v))\n\n # # Set list to dict as needed for node update\n # new_values_dict = {}\n # for i, value in enumerate(new_values_list):\n # new_values_dict[i] = {'value': value}\n #\n # # Update node value\n # nx.set_node_attributes(self.G, new_values_dict)", "def updateProcess(self, machine, process):\n\n stamp = time.time() - self.initTime\n if machine in self.activity.keys():\n if ((\"processes\" in self.activity[machine].keys()) and \n (process in self.activity[machine][\"processes\"].keys())):\n self.activity[machine][\"processes\"][process].append(stamp)\n else:\n self.activity[machine][\"processes\"] = {process : [stamp]}\n else:\n self.activity[machine] = {\"filtered activity\" : [],\n \"raw activity\" : [],\n \"time\" : [],\n \"processes\" : {process : [stamp]}}", "def __updateStreamStatus(self):\n while(True):\n for server,streams in self._streamsByServer.items():\n activeStreams = server.getActiveStreams()\n # Update each streams state\n for stream in streams:\n stream.lock.acquire()\n stream.setStreamState(server,Stream.STATE.DOWN)\n if (stream.name in activeStreams):\n stream.setStreamState(server,Stream.STATE.UP)\n stream.setStreamAddress(server,activeStreams[stream.name])\n stream.lock.release()\n time.sleep(StreamManager.SECS_BETWEEN_STATUS_CHECKS)", "def refresh_status(self):\n\n pass", "def updateRcloneJobStatus():\n global jobIds, jobStatusGauge\n\n # Check if the jobs are running, update the variables\n for jobName, jobId in jobIds.items():\n jobIsRunning = getRcloneJobRunning(jobId)\n jobIds[jobName] = jobId if jobIsRunning else None\n jobStatusGauge.labels(rclone_job=jobName).set(1 if jobIsRunning else 0)", "def _determineProcessStatus(self, procs):\n beforePids = set(self._deviceStats.pids)\n afterPidToProcessStats = {}\n pStatsWArgsAndSums, pStatsWoArgs = self._splitPStatMatchers()\n for pid, (name, psargs) in procs:\n pStats = self._deviceStats._pidToProcess.get(pid)\n if pStats:\n # We saw the process before, so there's a good\n # chance that it's the same.\n if pStats.match(name, psargs):\n # Yep, it's the same process\n log.debug(\"Found process %d on %s, matching %s %s with MD5\",\n pid, pStats._config.name, name, psargs)\n log.debug(\"%s found existing stat %s %s for pid %s - using MD5\", self._devId, pStats._config.name,\n pStats._config.originalName, pid)\n afterPidToProcessStats[pid] = pStats\n continue\n\n elif pStats.match(name, psargs, useMd5Digest=False):\n # In this case, our raw SNMP data from the\n # remote agent got futzed\n # It's the same process. Yay!\n log.debug(\"%s - Found process %d on %s, matching %s %s without MD5\",\n self._devId, pid, pStats._config.name, name, psargs)\n afterPidToProcessStats[pid] = pStats\n continue\n\n # Search for the first match in our list of regexes\n # that have arguments AND an MD5-sum argument matching.\n # Explicitly *IGNORE* any matchers not modeled by zenmodeler\n for pStats in pStatsWArgsAndSums:\n if pStats.match(name, psargs):\n log.debug(\"%s Found process %d on %s %s\",\n self._devId, pid, pStats._config.originalName, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n else:\n # Now look for the first match in our list of regexes\n # that don't have arguments.\n for pStats in pStatsWoArgs:\n if pStats.match(name, psargs, useMd5Digest=False):\n log.debug(\"Found process %d on %s\",\n pid, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n\n afterPids = set(afterPidToProcessStats)\n afterByConfig = reverseDict(afterPidToProcessStats)\n newPids = afterPids - beforePids\n deadPids = beforePids - afterPids\n\n restarted = {}\n for pid in deadPids:\n procStats = self._deviceStats._pidToProcess[pid]\n procStats.discardPid(pid)\n if procStats in afterByConfig:\n ZenProcessTask.RESTARTED += 1\n pConfig = procStats._config\n if pConfig.restart:\n restarted[procStats] = pConfig\n\n # Now that we've found all of the stragglers, check to see\n # what really is missing or not.\n missing = []\n for procStat in self._deviceStats.processStats:\n if procStat not in afterByConfig:\n missing.append(procStat._config)\n\n # For historical reasons, return the beforeByConfig\n beforeByConfig = reverseDict(self._deviceStats._pidToProcess)\n\n return (afterByConfig, afterPidToProcessStats,\n beforeByConfig, newPids, restarted, deadPids,\n missing)", "def update(self, sequence, status_codes, lock):\n if lock is not None:\n lock.acquire()\n\n seq_length = sequence.length\n self._requests_count['main_driver'] += seq_length\n seq_definition = sequence.definition\n seq_hash = sequence.hex_definition\n\n if seq_hash not in self._sequence_statuses:\n self._sequence_statuses[seq_hash] = SequenceStatusCodes(seq_length)\n\n # keep counter before looping over a changing dictionary\n num_test_cases = self.num_test_cases() + 1\n for code in status_codes:\n relative_timestamp = code.timestamp - self._start_time\n if code.status_code not in self._sequence_statuses[seq_hash].request_statuses:\n self._sequence_statuses[seq_hash].request_statuses[code.status_code] = []\n new_req_status = RequestExecutionStatus(\n relative_timestamp, code.request_hex, code.status_code, code.is_fully_valid, code.sequence_failure, num_test_cases=num_test_cases)\n self._sequence_statuses[seq_hash].request_statuses[code.status_code].append(new_req_status)\n\n running_time = int((int(time.time()*10**6) - self._start_time)/ 10**6)\n INTERVAL = 10 # minutes\n if running_time > self.log_counter*INTERVAL*60:\n from utils import logger\n logger.copy_stats(self.log_counter)\n self.log_counter += 1\n\n if lock is not None:\n lock.release()", "def set_offset_for_processes(self):\n processes = self.get_processes()\n if (len(processes) == 0):\n print \"Not enough servers up yet. Cannot synchronize clocks.\"\n return \"Cannot synchronize clocks yet.\"\n servers = list(processes.itervalues())\n\n local_time = time.time()\n times = [server.get_time_in_seconds() for server in servers]\n avg_time = (sum(times) + local_time)/(len(times) + 1.0)\n\n self.offset = avg_time - local_time\n for s, t in zip(servers, times):\n s.set_offset(avg_time - t)\n\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n return \"Clocks synchronized.\"", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def update(self, service):\n self.service.update(service)\n if service['running']:\n self.status = None, time.time()\n else:\n self.status = service['exit_code'], time.time()", "def pending_address_changes(self, pending_address_changes):\n\n self._pending_address_changes = pending_address_changes", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def _update(self):\n candidates = _find_running_exe(path.join(self.run_dir, \"osiris\"))\n\n try:\n if not candidates: # No process running found\n self.processes = None\n # Try to find a job in queue\n jobs = _get_grid_jobs()\n if not jobs: # Either no qstat or empty list\n self.running_mode = \"\"\n else:\n script_path = path.abspath(path.join(self.run_dir, \"start.sh\"))\n valid_jobs = list(filter(lambda j: j[\"script\"] == script_path, jobs))\n if valid_jobs:\n if len(valid_jobs) > 1:\n logger.warning(\"More than one grid job was found for the run.\")\n self.job = valid_jobs[0]\n self.running_mode = \"grid\"\n else: # No queued job\n self.running_mode = \"\"\n\n else:\n self.processes = list(map(psutil.Process, candidates))\n self.running_mode = \"local\"\n\n except psutil.NoSuchProcess:\n # If the processes have died before processing was completed.\n self.processes = None\n self.running_mode = \"\"", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1", "def performance_stats(self):\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n self.process_manager.handle_status_change(self.process_index, round(self.ioloop.time(), 2), current_status)\n\n if current_status != psutil.STATUS_DEAD:\n self.ioloop.call_later(0.5, self.performance_stats)", "def _update_attrs(self) -> None:\n self._attr_is_locked = self._lock.is_locked\n self._attr_is_jammed = self._lock.is_jammed\n # Only update changed_by if we get a valid value. This way a previous\n # value will stay intact if the latest log message isn't related to a\n # lock state change.\n if changed_by := self._lock.last_changed_by(self._lock_data.logs):\n self._attr_changed_by = changed_by" ]
[ "0.63074714", "0.61598605", "0.5967854", "0.587815", "0.5851434", "0.5803371", "0.57687116", "0.5440648", "0.5423083", "0.5397243", "0.5386455", "0.5369842", "0.5331419", "0.5302193", "0.52979624", "0.5289811", "0.52506447", "0.5244542", "0.52427137", "0.5232442", "0.5212697", "0.52122176", "0.5189281", "0.5178618", "0.51692206", "0.516691", "0.51450217", "0.5131513", "0.5113795", "0.51126397" ]
0.6196417
1
Add a new process to the process list.
def add_process(self, process): self.processes[process.namespec()] = process
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_process(self):\n process_id = str(self.processBox.currentText())\n\n arguments = {}\n\n for row in range(0, self.processTableWidget.rowCount()):\n p_id = \"\"\n val = None\n\n if self.processTableWidget.item(row, 0):\n p_id = self.processTableWidget.item(row, 0).text()\n if self.processTableWidget.item(row, 2):\n val = self.processTableWidget.item(row, 2).text()\n if len(val) > 0:\n try:\n val = json.loads(val)\n except json.JSONDecodeError:\n pass\n else:\n val = None\n if p_id != \"\":\n if val:\n arguments[p_id] = val\n\n self.processgraph = self.processgraph.add_process(process_id, arguments)\n # Refresh process graph in GUI\n self.reload_processgraph_view()", "def add_process(self, model_id, n_cores, n_time, s_time):\n p = Process(n_cores=n_cores, time_needed=n_time, model_id=model_id, start_time=s_time)\n self.process_list.append(p)", "async def add_process(self, ctx, process, name):\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f\"The process {process} is already being displayed\")\n elif name in PROCESSES.values():\n await ctx.send(f\"The process name {name} is already being displayed\")\n\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been added\")", "def register_proc(self, pid: int):\n self.processes.add(pid)", "def add_process(self, type, name=None, config=None):\n assert name is not None, 'must specify name for now'\n node = Process(type=type, name=name, config=config)\n self.procs[name] = node\n return node", "def addProcessObject(self, process, env, uid=None, gid=None):\n name = process.getName()\n self.processes[name] = (process, env, uid, gid)\n self.delay[name] = self.minRestartDelay\n if self.running:\n self.startProcess(name)", "def addProcess(self, name, args, uid=None, gid=None, env={}):\n class SimpleProcessObject(object):\n\n def starting(self):\n pass\n\n def stopped(self):\n pass\n\n def getName(self):\n return name\n\n def getCommandLine(self):\n return args\n\n def getFileDescriptors(self):\n return []\n\n self.addProcessObject(SimpleProcessObject(), env, uid, gid)", "def _RegisterProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n if process.pid in self._processes_per_pid:\n raise KeyError(\n 'Already managing process: {0!s} (PID: {1:d})'.format(\n process.name, process.pid))\n\n self._processes_per_pid[process.pid] = process", "def alloc_proc(self, process, delta_t):\n\t\tself._process_list.append(process)", "def addProcess(self, name, proc_config):\n if self.processes.has_key(name):\n raise KeyError(\"remove %s first\" % name)\n p = self.engineProtocol()\n p.service = self\n p.name = name\n proc_config.processProtocol = p\n self.processes[name] = proc_config\n if self.running:\n self.startProcess(name)\n return p.deferred", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def graph_add_process(self, process_id, args) -> 'ImageCollection':\n graph = {\n 'process_id': process_id,\n 'args': args\n }\n\n return RESTProcesses(graph, self.connection)", "def appendProcess(self, pid: int, numberOfVariables: int, processTable, diagnostics) -> int:\n self.memory[pid] = []\n\n for _i in range(numberOfVariables):\n self.memory[pid].append(MemoryItem())\n\n return 0", "def addTask(self, task):\n self.tasklist.append(task)", "def add_node(self, managed_process_pb):\n node = ProcessNode(managed_process_pb)\n\n if node.name not in self.nodes:\n self.nodes[node.name] = node\n self.logger.info('Created node for [{}]'.format(node.name))\n else:\n self.logger.error(\n 'Detected request to add a managed process using the name [{}] which is already taken'.format(\n node.name))\n raise DuplicateManagedProcessName(\n 'Cannot have more than one managed process with name [{}]'.format(node.name))\n\n return node", "def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])", "def add_child(self, pid):\n self._children_ids.append(pid)\n self._sort_children()", "def addProcessGroup(self, name):\r\n self._update('addProcessGroup')\r\n\r\n for config in self.supervisord.options.process_group_configs:\r\n if config.name == name:\r\n result = self.supervisord.add_process_group(config)\r\n if not result:\r\n raise RPCError(Faults.ALREADY_ADDED, name)\r\n return True\r\n raise RPCError(Faults.BAD_NAME, name)", "def add(self, p):\n self._pumps.add(p)", "def add_task(self, task):\n # assert issubclass(type(task), Predictor) # not necessary..\n try:\n assert task.processes <= self.processes\n if self.folder: task.folder = self.folder # override with this \n if self.save_all_tasks: task.save = True\n if self.verbose >= 2: task.verbose = True\n self.tasks.append(task)\n if self.verbose: print '[Parallel] added task', task.name, 'with', task.processes, 'proces{}.'.format('ses' if self.processes > 1 else '')\n except AttributeError:\n raise Exception('No processes found!')", "def add_page(self,pid):\n pgs = self.get_pages_list()\n if len(pgs) == 1 and pgs[0] == '':\n pgs[0] = str(pid)\n else:\n pgs.append(str(pid))\n self.put_pages_list(pgs)", "def _register_process(self, process_instance, name):\n # Add process instance to container's process dict\n if name in self.procs_by_name:\n log.warn(\"Process name already registered in container: %s\" % name)\n self.procs_by_name[name] = process_instance\n self.procs[process_instance.id] = process_instance\n\n # Add Process to resource registry\n # Note: In general the Process resource should be created by the CEI PD, but not all processes are CEI\n # processes. How to deal with this?\n process_instance.errcause = \"registering\"\n\n if process_instance._proc_type != IMMEDIATE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n proc_obj = Process(name=process_instance.id, label=name, proctype=process_instance._proc_type)\n proc_id, _ = self.container.resource_registry.create(proc_obj)\n process_instance._proc_res_id = proc_id\n\n # Associate process with container resource\n self.container.resource_registry.create_association(self.cc_id, \"hasProcess\", proc_id)\n else:\n process_instance._proc_res_id = None\n\n # Process type specific registration\n # TODO: Factor out into type specific handler functions\n if process_instance._proc_type == SERVICE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n # Registration of SERVICE process: in resource registry\n service_list, _ = self.container.resource_registry.find_resources(restype=\"Service\", name=process_instance.name, id_only=True)\n if service_list:\n process_instance._proc_svc_id = service_list[0]\n if len(service_list) > 1:\n log.warn(\"More than 1 Service resource found with name %s: %s\", process_instance.name, service_list)\n else:\n # We are starting the first process of a service instance\n # TODO: This should be created by the HA Service agent in the future\n svc_obj = Service(name=process_instance.name, exchange_name=process_instance._proc_listen_name, state=ServiceStateEnum.READY)\n process_instance._proc_svc_id, _ = self.container.resource_registry.create(svc_obj)\n\n # Create association to service definition resource\n svcdef_list, _ = self.container.resource_registry.find_resources(restype=\"ServiceDefinition\",\n name=process_instance.name, id_only=True)\n if svcdef_list:\n if len(svcdef_list) > 1:\n log.warn(\"More than 1 ServiceDefinition resource found with name %s: %s\", process_instance.name, svcdef_list)\n self.container.resource_registry.create_association(process_instance._proc_svc_id,\n \"hasServiceDefinition\", svcdef_list[0])\n else:\n log.error(\"Cannot find ServiceDefinition resource for %s\", process_instance.name)\n\n self.container.resource_registry.create_association(process_instance._proc_svc_id, \"hasProcess\", proc_id)\n\n elif process_instance._proc_type == AGENT_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.DIRECTORY):\n # Registration of AGENT process: in Directory\n caps = process_instance.get_capabilities()\n self.container.directory.register(\"/Agents\", process_instance.id,\n **dict(name=process_instance._proc_name,\n container=process_instance.container.id,\n resource_id=process_instance.resource_id,\n agent_id=process_instance.agent_id,\n def_id=process_instance.agent_def_id,\n capabilities=caps))\n\n self._call_proc_state_changed(process_instance, ProcessStateEnum.RUNNING)", "def graph_add_process(self, process_id, args) -> 'ImageCollection':\n graph = {\n 'process_id': process_id,\n\n }\n\n for key, value in args.items():\n graph[key] = value\n\n #graph = {\n # 'process_id': process_id,\n # 'args': args\n #}\n\n return RestImagery(graph, self.session)", "def start_processing(self, task, task_name, ignore_SIGINT=False) -> None:\n self.poll_process_done()\n print(definitions.PRINT_CODES[0] + blue(\"Processes in queue before add: \"), blue(len(self.process_queue)))\n\n def preexec_function():\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n if ignore_SIGINT:\n sp = subprocess.Popen(task, preexec_fn=preexec_function)\n else:\n sp = subprocess.Popen(task)\n\n self.process_queue.append((task_name, sp))\n\n print(definitions.PRINT_CODES[0] + blue(\"Processes in queue after add: \"), blue(len(self.process_queue)))", "def add_post(self, post: Post) -> None:\n self.post_process.append(post)", "def add_from_proc(self, proc_dir):\n for dir_name in sorted(os.listdir(proc_dir)):\n if re.match(r'^[0-9]+$', dir_name):\n self.add_from_piddir(os.path.join(proc_dir, dir_name))", "def new_process() -> Process:\n return multiprocessing.Process()", "def addMonitoring(process):\n import FWCore.ParameterSet.Config as cms\n \n process.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n jobReportOutputOnly = cms.untracked.bool(True)\n )\n process.Timing = cms.Service(\"Timing\",\n summaryOnly = cms.untracked.bool(True)\n )\n \n return process", "def add(self, proc: ImageProcessor):\n self.chain.append(proc)\n return self", "def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add" ]
[ "0.76126385", "0.7507758", "0.7315577", "0.72722274", "0.72574365", "0.71727246", "0.68514043", "0.6843939", "0.6840627", "0.6770633", "0.6737064", "0.67065763", "0.6432417", "0.6367946", "0.63575804", "0.6237238", "0.61873573", "0.61138415", "0.61019224", "0.6021345", "0.60172594", "0.58842874", "0.5881574", "0.58118963", "0.5750207", "0.5741674", "0.5721125", "0.5711494", "0.5704384", "0.56955653" ]
0.83901864
0
Return the process running on the address. Here, 'running' means that the process state is in Supervisor RUNNING_STATES.
def running_processes(self): return [process for process in self.processes.values() if process.running_on(self.address_name)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def running(self):\n info = self.info()\n return info['running']", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def get_running_status(self):\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n return dict_processor\n else:\n return False", "def running_state(self) -> int | None:\n return self.cluster.get(\"running_state\")", "def get_process(self, pid):\n return self.processes.get(pid, None)", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def is_process_running(self, name):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Checking to see if the process {} is \"\n \"running\".format(log_tag, name))\n return self.get_pids(name) is not None", "def get_process_by_port(port):\n pcons = [proc for proc in psutil.net_connections() if proc.laddr.port == port and proc.status == \"LISTEN\"]\n if pcons:\n pid = pcons[0].pid\n if not pid:\n raise j.exceptions.Runtime(\"No pid found maybe permission denied on the process\")\n return psutil.Process(pid)", "def _get_running_node(self, node_name):\n nodes = self._driver.list_nodes()\n node = [i for i in nodes if (i.name == node_name and i.state != 'terminated')]\n return (len(node) > 0 and node or None)", "def pid_is_running(pid):\n try:\n os.kill(pid, 0)\n\n except OSError:\n return\n\n else:\n return pid", "def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def port(self):\n if self._state == JobState.RUNNING:\n return self._process.port\n return None", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def getPidRunningStatus(self, seg):\n (postmasterPidFileExists, tempFileExists, lockFileExists, netstatPortActive, pidValue) = \\\n gp.chk_local_db_running(seg.getSegmentDataDirectory(), seg.getSegmentPort())\n\n return {\n 'postmasterPidFileExists' : postmasterPidFileExists,\n 'tempFileExists' : tempFileExists,\n 'lockFileExists' : lockFileExists,\n 'netstatPortActive' : netstatPortActive,\n 'pidValue' : pidValue\n }", "def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def is_running(program):\n return program in get_running()", "def pid(self):\n return self._query_status()['pid']", "def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def _QueryProcessStatus(self, process):\n process_is_alive = process.is_alive()\n if not process_is_alive:\n return None\n\n rpc_client = self._rpc_clients_per_pid.get(process.pid, None)\n return rpc_client.CallFunction()", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def status(self):\n pid = self.pid()\n if pid is None or not pid_exists(pid):\n return False\n\n process = Process(pid)\n try:\n for connection in process.connections():\n if connection.status == 'LISTEN' and \\\n connection.laddr[1] == self.port:\n return True\n except AccessDenied:\n return False\n\n return False", "def is_running(self):\n\t\treturn self._running", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)" ]
[ "0.61827284", "0.605477", "0.5984106", "0.59605074", "0.5830705", "0.5814387", "0.57667416", "0.57487637", "0.57090694", "0.5694411", "0.5676516", "0.5661446", "0.5649374", "0.5617451", "0.56060237", "0.55911523", "0.55796784", "0.5576243", "0.55739254", "0.55647177", "0.5558564", "0.5558167", "0.5543049", "0.5539878", "0.55385584", "0.55162203", "0.54997295", "0.5493566", "0.548994", "0.54804546" ]
0.6297126
0
Return the process running on the address and having a pid. Different from running_processes_on because it excludes the states STARTING and BACKOFF
def pid_processes(self): return [(process.namespec(), process.infos[self.address_name]['pid']) for process in self.processes.values() if process.pid_running_on(self.address_name)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_process(self, pid):\n return self.processes.get(pid, None)", "def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_process_by_port(port):\n pcons = [proc for proc in psutil.net_connections() if proc.laddr.port == port and proc.status == \"LISTEN\"]\n if pcons:\n pid = pcons[0].pid\n if not pid:\n raise j.exceptions.Runtime(\"No pid found maybe permission denied on the process\")\n return psutil.Process(pid)", "def pid(self):\n return self._process.pid", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def pid(self):\n return self._get_process_id()", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def pid(self):\n return self._query_status()['pid']", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def pid_is_running(pid):\n try:\n os.kill(pid, 0)\n\n except OSError:\n return\n\n else:\n return pid", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def _pid(self, name):\n return self.pid_lookup[name]", "def get_my_process():\n return get_process_object(os.getpid())", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)", "def pfind(pid):\n for p in list_foreach(\"allproc\", \"p_list\"):\n if p['p_pid'].cast(gdb.lookup_type(\"int\")) == pid:\n return p\n raise gdb.error(\"No process with pid {} exists\".format(pid))", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid" ]
[ "0.7015587", "0.670613", "0.6585871", "0.6583493", "0.64844155", "0.64499414", "0.6438885", "0.64362067", "0.6434716", "0.63813657", "0.63745505", "0.63590276", "0.63359684", "0.6330875", "0.63202155", "0.62983465", "0.62884915", "0.62884915", "0.6279087", "0.62753826", "0.6270416", "0.6269101", "0.62552744", "0.6242815", "0.62151974", "0.62145275", "0.62132734", "0.61312455", "0.6125759", "0.6122484" ]
0.70801574
0
Return the loading of the address, by summing the declared loading of the processes running on that address
def loading(self): loading = sum(process.rules.expected_loading for process in self.running_processes()) self.logger.debug('address={} loading={}'. format(self.address_name, loading)) return loading
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_load(self):\n nn = [p.n for p in self._procs]\n return sum(nn), min(nn)", "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def _sample_load(proc):\n return 0.01 * _for_process_and_descendants(\n psutil.Process.get_cpu_percent,\n proc,\n )", "def get_idumpload(self):\n return self.read_register(4102, 1, 3)", "def get_elf_load_base_addr(afile):\n # Using cached result if possible\n if afile in g_elf_load_base_addr_db:\n return g_elf_load_base_addr_db[afile]\n addr = get_entry_addr(afile)\n verbose(afile + \" Entry point address is: \" + hex(addr), LEVEL_1)\n alignment = get_elf_load_alignment(afile)\n #verbose(afile + \" LOAD alignment is: \" + str(alignment), LEVEL_1)\n base_addr = addr & ~(alignment - 1) ### align to LOAD alignment, 65536 by default\n g_elf_load_base_addr_db[afile] = base_addr\n return base_addr", "def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)", "def get_cpu_load (processor_number=0):\n\ttry:\n\t\tf = open(\"/proc/stat\", \"r\")\n\t\ttmp = f.readlines(2000)\n\t\tf.close()\n\texcept:\n\t\tprint _(\"Failed to open /proc/stat\")\n\t\treturn None\n\tif processor_number == 0 : sufix = ''\n\telse: sufix = str(processor_number -1)\n\tline = tmp[processor_number]\n\n\tif line.startswith(\"cpu%s\"% (sufix)):\n\t\tcuse = float( line.split()[1] )\n\t\tcn = float( line.split()[2] )\n\t\tcsys = float( line.split()[3])\n\t\tif sufix == '':\n\t\t\tload = cuse + cn\n\t\telse:\n\t\t\tload = cuse + csys + cn\n\t\t#load = int(load / .update_interval)\n\t\treturn load\n\treturn None", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def loadbyproc(path, parameter, nproc):\n vals = []\n for iproc in range(nproc):\n vals += [loadbin(path, iproc, parameter)]\n return vals", "def _get_address_calculation(segment, index, file_name):\n\n if segment == \"constant\": # Temp starts at 5\n load_bytecode = [f\"@{index}\", \"D=A\"]\n\n elif segment == \"temp\":\n load_bytecode = [f\"@{int(index) + 5}\", \"D=A\"]\n\n elif segment == \"static\":\n variable_name = file_name + \".\" + index\n load_bytecode = [f\"@{variable_name}\", \"D=A\"]\n\n elif segment == \"pointer\":\n if index == \"0\":\n register = \"THIS\"\n else:\n register = \"THAT\"\n\n load_bytecode = [f\"@{register}\", \"D=A\"]\n\n else:\n load_bytecode = [f\"@{VirtualMachineLibrary._get_symbolic_symbol(segment)}\", \"D=M\", f\"@{index}\", \"D=D+A\"]\n\n full_address_bytecode = load_bytecode + [\"@R13\", \"M=D\"]\n return full_address_bytecode", "def nonentry(runtime_addr):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n binary_addr, _ = movemanager.r2b_checked(runtime_addr)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr)\n\n trace.cpu.traced_entry_points.add(binary_addr)", "def load(self):\n import string\n import numpy.distutils.proc as numpy_proc\n results = self.load_list()\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%6s: %1.2f,\" % (name[-6:], res['load_1'])\n print s,\n if not ((i+1) % 5):\n print", "def getAddress(self, ins_addr, image_base, image_name = \"\"):\r\n for image in self.loaded_images:\r\n if (image_name == \"\" or image.name == image_name) and image.contains(ins_addr):\r\n return (image_base + image.get_offset(ins_addr), True)\r\n \r\n return (ins_addr, False)", "def get_address(self, addresses, expected_loading):\n self.logger.trace('addresses={} expectedLoading={}'.format(\n addresses, expected_loading))\n # returns the most loaded remote from list that is capable of\n # handling the loading\n loading_validities = self.get_loading_and_validity(addresses,\n expected_loading)\n sorted_addresses = self.sort_valid_by_loading(loading_validities)\n return sorted_addresses[-1][0] if sorted_addresses else None", "def get_min_addr(self):\n\n out = None\n for segment in self.segments:\n if out is None or segment.min_addr < out:\n out = segment.min_addr\n\n if out is None:\n for section in self.sections:\n if out is None or section.min_addr < out:\n out = section.min_addr\n\n if out is None:\n return self.rebase_addr\n else:\n return out + self.rebase_addr", "def get_address(self, addresses, expected_loading):\n self.logger.trace('addresses={} expectedLoading={}'.format(\n addresses, expected_loading))\n # returns the less loaded remote from list that is capable of handling\n # the loading\n loading_validities = self.get_loading_and_validity(\n addresses, expected_loading)\n sorted_addresses = self.sort_valid_by_loading(loading_validities)\n return sorted_addresses[0][0] if sorted_addresses else None", "def setKernelLoadAddress(self):\n\t\tself.kernelloadaddress = self.settings.getKeyValue('kernel.load.address')\n\t\treturn None", "def is_loading_valid(self, address, expected_loading):\n if address in self.context.addresses.keys():\n status = self.context.addresses[address]\n self.logger.trace('address {} state={}'.format(\n address, status.state_string()))\n if status.state == AddressStates.RUNNING:\n loading = status.loading()\n self.logger.debug('address={} loading={} expected_loading={}'.format(\n address, loading, expected_loading))\n return (loading + expected_loading < 100, loading)\n self.logger.debug('address {} not RUNNING'.format(address))\n return (False, 0)", "def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))", "def get_loading_and_validity(self, addresses, expected_loading):\n if '*' in addresses:\n addresses = self.supvisors.address_mapper.addresses\n loading_validities = {address: self.is_loading_valid(address,\n expected_loading)\n for address in addresses}\n self.logger.trace('loading_validities={}'.format(loading_validities))\n return loading_validities", "def get_elf_load_alignment(afile):\n default_load_alignment = 2**16\n objdump_prog = get_config_value(\"objdump\")\n if not objdump_prog:\n objdump_prog = \"objdump\"\n cmd = objdump_prog + \" -p \" + cmd_quote(afile) + \" | grep align | grep LOAD || true\"\n output = get_shell_cmd_output(cmd)\n lines = output.splitlines()\n if not lines:\n return default_load_alignment\n line = lines[0]\n tokens = line.split(\" align \")\n if len(tokens) < 2:\n return default_load_alignment\n alignment = tokens[1]\n verbose(afile + \" LOAD alignment is: \" + alignment, LEVEL_1)\n return eval(alignment)", "def readelf_get_info(op):\n ret = {}\n (so, se) = run_command([\"readelf\", \"--file-header\", \"--program-headers\", op])\n match = re.search(r'LOAD\\s+\\S+\\s+(\\S+)\\s+\\S+\\s+(\\S+)\\s+\\S+\\s+RWE', so, re.MULTILINE)\n if match:\n ret[\"base\"] = int(match.group(1), 16)\n ret[\"size\"] = int(match.group(2), 16)\n else:\n raise RuntimeError(\"could not read first PT_LOAD from executable '%s'\" % (op))\n match = re.search(r'Entry\\spoint\\saddress:\\s+(\\S+)', so, re.MULTILINE)\n if match:\n ret[\"entry\"] = int(match.group(1), 16) - ret[\"base\"]\n else:\n raise RuntimeError(\"could not read entry point from executable '%s'\" % (op))\n return ret", "def get_total_n_cpu(self) -> int:", "def get_binary_start_address(target_binary):\n obj_dump = subprocess.Popen([\"objdump\", \"-f\", target_binary],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n results = obj_dump.stdout.read().decode()\n start_address = results.strip()[-10:]\n return start_address", "def load_avg():\n \n with open(Path.proc_loadavg()) as f:\n line = f.readline()\n \n load_avgs = [float(x) for x in line.split()[:3]]\n \n return load_avgs", "def alloc_address(self):\n\t\taddress_num = randint(0, ADDR_SPACE)\n\t\twhile address_num in self.used is True:\n\t\t\taddress_num = randint(0, ADDR_SPACE)\n\t\tself.used.add(address_num)\n\t\treturn address_num", "def get_load_avg():\n \n with open('/proc/loadavg') as f:\n line = f.readline()\n \n return [float(x) for x in line.split()[:3]]", "def get_avg_load(verbose=False):\n output = run(\"top -d0.5 -n4 | grep Cpu\", quiet=True)\n\n # Strip formatting control characters (top output can have a lot of these)\n output = (output.replace('\\x1b(B','')\n .replace('\\x1b[m','')\n .replace('\\x1b[K','')\n .replace('\\x1b[39;49m',''))\n\n output = output.splitlines()\n\n loads = []\n for i in xrange(len(output)):\n # Top output tends to look like\n # Cpu(s): 2.9%us, 0.0%sy, 0.0%ni, ... OR\n # Cpu(s): 2.9% us, 0.0% sy, 0.0% ni, ... OR\n # %Cpu(s): 2.9 us, 0.0 sy, 0.0 ni, ...\n # We use a regex to match the floating point value for percentage load\n regex = re.compile(\n \"\"\"\n .*Cpu\\(s\\): # any chars before \"Cpu(s):\"\n \\s* # any amount of whitespace\n (\\d*.?\\d*) # any digits, <= 1 period, any digits (i.e. any positive float)\n \\s* # any amount of whitespace\n %? # <= 1 percent symbol (some versions of top just have one \"%\" on this line, before \"Cpu(s)\"\n \\s* # any amount of whitespace\n us # total system load appears to be marked \"us\"\n \"\"\", re.VERBOSE)\n\n matches = regex.findall(output[i])\n #print(repr(output[i]))\n if (len(matches) == 1):\n load = float(matches[0])\n loads.append(load)\n else:\n print(\"Error: On host = {Host}, unable to match total cpu load in string\\n{Output}\"\n .format(Host = env.host, Output = output[i]))\n\n # Throw out the first record of CPU load because it always seems to spike\n # briefly after the command is issued.\n loads = loads[1:]\n avg_load = None\n if len(loads) != 0:\n avg_load = sum(loads)/float(len(loads))\n else:\n print(\"Error: On host = {Host}, len(loads) == 0\"\n .format(Host = env.host))\n\n if (verbose):\n print(\"{Host:4} | Average load: {Load:3.2f}%\".format(Host=env.host, Load=avg_load))\n\n return avg_load", "def LocalAddress(self) -> _n_5_t_0:" ]
[ "0.62994903", "0.5876818", "0.577103", "0.5671219", "0.56705534", "0.56662524", "0.55867136", "0.55307513", "0.54727614", "0.5392582", "0.5365475", "0.533915", "0.5335333", "0.5271789", "0.52094007", "0.5200593", "0.519389", "0.5176355", "0.51122624", "0.51087445", "0.5093616", "0.5085855", "0.5041698", "0.50197256", "0.50098157", "0.4999754", "0.49942482", "0.49912903", "0.499126", "0.49855635" ]
0.7182065
0
Returns two lists with the ids of grec_seq_rec and seq_reserva
def get_seq_lists(dataframe_bookings): seq_rec = dataframe_bookings.select('operative_incoming').collect() seq_reserva = dataframe_bookings.select('booking_id').collect() seq_rec = [val[0] for val in list(seq_rec)] seq_reserva = [val[0] for val in list(seq_reserva)] return seq_rec, seq_reserva
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seq_xref_ids(entry):\n\n xref_ids = []\n exon_data = exons(entry)\n for ids in xref_data(entry).values():\n for exon in exon_data:\n for xref_id in ids:\n key = \"{xref_id}-{gene_id}-{chr}:{start}..{stop}\".format(\n xref_id=xref_id,\n gene_id=primary_id(entry),\n chr=exon.chromosome_name,\n start=exon.primary_start,\n stop=exon.primary_end,\n )\n xref_ids.append((key, exon))\n\n return xref_ids", "def get_seq_and_id(fasta_file, promoter_seq, promoter_ids, threshold, scores_file, delimiter):\n\n map_txt = \"DDB_DDB_G/DDB-GeneID-UniProt.txt\"\n df = pd.read_csv(map_txt, sep=\"\\t\")\n ddb_id = list(df['DDBDDB ID'].as_matrix())\n ddb_g_id = list(df['DDB_G ID'].as_matrix())\n\n all_valid_records = get_data_target.get_ids(scores_file, delimiter, 'ID')\n print(all_valid_records)\n sequences = []\n record_ids = []\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n record_id = str(record.id)\n end = record_id.find('|')\n record_id_short = record_id\n if end != -1:\n record_id_short = record_id[:end]\n print(record_id_short)\n try:\n ddbg_record_id_short = ddb_g_id[ddb_id.index(record_id_short)]\n except ValueError:\n ddbg_record_id_short = record_id_short\n if ddbg_record_id_short in all_valid_records:\n record_ids.append(ddbg_record_id_short)\n seq = str(record.seq)[-threshold:]\n sequences.append(seq)\n data_record_ids = pd.DataFrame({\"record_id\": record_ids})\n data_sequences = pd.DataFrame({\"record_sequence\": sequences})\n data_record_ids.to_csv(promoter_ids, index=False, header=False)\n data_sequences.to_csv(promoter_seq, index=False, header=False)", "def getIDs():", "def _id_seq(self):\n return list(self.keys())", "def create_recordid_list(rec_ids):\n rec_list = []\n for row in rec_ids:\n rec_list.append(row[0])\n return rec_list", "def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]", "def get_ids(self) -> List[str]:", "def subseqs_ids(subsequences, sequence):\n return [1 if subsequence in sequence else 0 for subsequence in subsequences]", "def get_recordIds(self):\n record_ids = []\n for item in self.order_items:\n record_ids.append(item.get_recordId())\n \n return record_ids", "def ids(self):\n return frozenset([seq.id for seq in self])", "def sequence_to_list_ids(sequence, vocab):\n pass", "def get_reservations_ids(self, instance_ids=None):\n reservations = self.__get_reservations(instance_ids)\n reservations_ids = []\n for reservation in reservations:\n reservations_ids.append(reservation.id.encode(\"latin-1\"))\n\n return reservations_ids", "def get_ids(self,tokens, tokenizer, max_seq_length):\n token_ids = tokenizer.convert_tokens_to_ids(tokens,)\n input_ids = token_ids + [0] * (max_seq_length-len(token_ids))\n return input_ids", "def get_seq(self): # -> list[Unknown]:\n ...", "def get_training_seqs(self):\r\n # Rdp requires unique sequence IDs without whitespace. Can't\r\n # trust user IDs to not have whitespace, so we replace all\r\n # whitespace with an underscore. Classification may fail if\r\n # the replacement method generates a name collision.\r\n for seq_id, node in self.sequence_nodes.iteritems():\r\n seq = self.sequences.get(seq_id)\r\n if seq is not None:\r\n lineage = node.get_lineage()\r\n rdp_id = '%s %s' % (\r\n re.sub('\\s',\r\n '_',\r\n seq_id),\r\n ';'.join(lineage))\r\n yield rdp_id, seq", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def getLigandResIds(ligchemid:str, struct: Structure)->List[Residue]:\n \"\"\"*ligchemids are of type https://www.rcsb.org/ligand/IDS\"\"\"\n ligandResidues: List[Residue] = list(filter(lambda x: x.get_resname() == ligchemid, list( struct.get_residues() )))\n return ligandResidues", "def obtain_DyndbProtein_id_list(dyn_id):\n model=DyndbModel.objects.select_related(\"id_protein\",\"id_complex_molecule\").get(dyndbdynamics__id=dyn_id)\n pdbid=model.pdbid\n prot_li_gpcr=[]\n dprot_li_all=[]\n dprot_li_all_info=[]\n if model.id_protein:\n dprot=model.id_protein\n dprot_li_all=[dprot]\n (prot_li_gpcr,dprot_li_all_info)=retrieve_prot_info(dprot,prot_li_gpcr,dprot_li_all_info)\n else:\n dprot_li_all=DyndbProtein.objects.select_related(\"receptor_id_protein\").filter(dyndbcomplexprotein__id_complex_exp__dyndbcomplexmolecule=model.id_complex_molecule.id)\n for dprot in dprot_li_all:\n (prot_li_gpcr,dprot_li_all_info)=retrieve_prot_info(dprot,prot_li_gpcr,dprot_li_all_info)\n return (prot_li_gpcr, dprot_li_all, dprot_li_all_info,pdbid)", "def genAckList(self):\n\t\tresult = []\n\t\tfor p in self.rSegs:\n\t\t\tresult.append(p.segNo)\n\t\treturn result", "def findsegments(id1, seq1, id2, seq2, minlen):\n\n segments = \"\"\n\n # Initialize list of corresponding residues.\n correspondances = []\n for res in seq1:\n correspondances.append([])\n \n # Main loop.\n for i in range(len(seq1)-minlen):\n seg1 = seq1[i:i+minlen]\n for j in range(len(seq2)-minlen):\n if j not in correspondances[i]:\n seg2 = seq2[j:j+minlen]\n if seg1 == seg2:\n # Look if the segment is longer than minlen.\n segments_equal = True\n prev1 = seg1\n prev2 = seg2\n extend = 1\n while segments_equal == True:\n i_end = i+minlen+extend\n j_end = j+minlen+extend\n ext1 = seq1[i:i_end]\n ext2 = seq2[j:j_end]\n if i_end > len(seq1) or j_end > len(seq2):\n seqend = True\n else:\n seqend = False\n if ext1 != ext2 or seqend == True:\n segments_equal = False\n segments += \"{} \".format(prev1)\n segments += \"{} [{}, {}] \".format(id1, i, i_end-2)\n segments += \" \"\n segments += \"{} [{}, {}] \".format(id2, j, j_end-2)\n segments += \"\\n\"\n # Add residues to correspondance list.\n for k in range(minlen+extend-1):\n l = i+k\n m = j+k\n correspondances[l].append(m)\n prev1 = ext1\n prev2 = ext2\n extend += 1\n\n return segments", "def test_seqids_from_otu_to_seqid(self):\r\n otu_to_seqid = {'0': ['ABC_0', 'DEF_1'], 'x': ['GHI_2']}\r\n self.assertEqual(seqids_from_otu_to_seqid(otu_to_seqid),\r\n set(['ABC_0', 'DEF_1', 'GHI_2']))", "def get_villager_ids(g):\n return [id for id in g.keys()\n if g[id] in ('v', 'b', 's', 'c')]", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def get_registration_ids_and_times(self):\n tables = self._get_class_edit_tables(self.soup)\n if not tables:\n return []\n tbody = tables[0].tbody\n if not tbody:\n return []\n regs = []\n for tr in tbody.find_all('tr'):\n regs.append(self._get_reg_id_and_time(tr))\n return regs", "def flowgram_id_to_seq_id_map(seqs):\r\n result = {}\r\n for id_, seq in seqs:\r\n fields = id_.split()\r\n seq_id = id_\r\n flowgram_id = fields[1]\r\n result[flowgram_id] = seq_id\r\n return result", "def get_child_ids(self, job_specifier, project=None, status=None):\n if project is None:\n project = self._project\n id_master = self.get_job_id(project=project, job_specifier=job_specifier)\n if id_master is None:\n return []\n else:\n if status is not None:\n id_lst = self._job_table[\n (self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values\n else:\n id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values\n return sorted(id_lst)", "def _rc_seq(self):\n logger.debug(\"Extracting sequences on the reverse strand\")\n sequences_rc = []\n table = str.maketrans({'a': 't', 'c': 'g', 'g': 'c', 't': 'a',\n 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'})\n for sequence in self.sequences:\n sequences_rc.append(sequence.translate(table)[::-1])\n self.sequences_rc = sequences_rc", "def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))", "def get_genera_sequences_from(self, table):\n log.info(\"Joining the sequences of all the scaffolds with the same genus\")\n if not self.table_exists(table):\n raise ValueError(\"The database does not have table {0}\".format(table))\n # Get all the scaffolds assigned\n sql_command = \"\"\"SELECT {0}.scaffold, {0}.genus, {1}.sequence\n FROM {0}\n INNER JOIN {1}\n WHERE {0}.scaffold={1}.scaffold\n \"\"\".format(table, self.ScaffoldsTable)\n genus2sequence_dict = dict() # dictionary of sequences indexed by genus\n assigned_scaffolds = set()\n cursor = self.execute(sql_command)\n record = cursor.fetchone()\n while record:\n genus = record[\"genus\"]\n if not genus in genus2sequence_dict:\n genus2sequence_dict[genus] = [record[\"sequence\"]]\n else:\n genus2sequence_dict[genus].append(record[\"sequence\"])\n assigned_scaffolds.add(record[\"scaffold\"])\n record = cursor.fetchone()\n # join all sequences\n for genus in genus2sequence_dict:\n genus2sequence_dict[genus] = \"\".join(genus2sequence_dict[genus])\n return genus2sequence_dict, assigned_scaffolds" ]
[ "0.6269399", "0.61774325", "0.60897326", "0.5778329", "0.5776383", "0.5763285", "0.5730644", "0.5721721", "0.57183284", "0.568632", "0.5685907", "0.56623137", "0.55932146", "0.5542651", "0.5516142", "0.5515765", "0.5473292", "0.5469163", "0.54488885", "0.5432289", "0.5427406", "0.53961676", "0.5367193", "0.53545386", "0.53257567", "0.53158116", "0.5269151", "0.5259547", "0.5253886", "0.5237307" ]
0.6646673
0
It calculates the subquery for the field Tax_Sales_Transfer_pricing
def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas): # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel']) # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit']) # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other']) # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer']) # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments']) # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra']) df_aux = df_fields.select("operative_incoming", "booking_id") df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux) df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux) df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux) df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux) df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux) df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux) df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union( df_extra) df_impuesto_canal = df_impuesto_canal.groupBy("seq_rec", "seq_reserva") \ .agg({'impuesto_canal': 'sum'}).withColumnRenamed("SUM(impuesto_canal)", "Tax_Sales_Transfer_pricing") df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec, df_fields.booking_id == df_impuesto_canal.seq_reserva], 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva) df_fields = df_fields.na.fill({"Tax_Sales_Transfer_pricing": 0}) df_fields = df_fields.withColumn("Tax_Sales_Transfer_pricing", udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing, df_fields.booking_currency)) del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal return df_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_tax_cost_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_cost_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_cost_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_cost_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_cost_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_cost_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_cost_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\", udf_round_ccy(df_fields.Tax_Cost_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.lst_price,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price2 = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.list_price2,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def calculate_taxes(self, proforma, technologies):\n tax_calcs = copy.deepcopy(proforma)\n # 1) Redistribute capital cost according to the DER's MACRS value to get depreciation\n for der_inst in technologies:\n tax_contribution = der_inst.tax_contribution(self.macrs_depreciation,\n tax_calcs.index, self.start_year)\n if tax_contribution is not None:\n tax_calcs = pd.concat([tax_calcs, tax_contribution], axis=1)\n # 2) calculate yearly_net (taking into account the taxable contribution of each technology\n # asset)\n yearly_net = tax_calcs.sum(axis=1)\n tax_calcs['Taxable Yearly Net'] = yearly_net\n\n # 3) Calculate State tax based on the net cash flows in each year\n tax_calcs['State Tax Burden'] = yearly_net * -self.state_tax_rate\n\n # 4) Calculate Federal tax based on the net cash flow in each year minus State taxes\n # from that year\n yearly_net_post_state_tax = yearly_net + tax_calcs['State Tax Burden']\n tax_calcs['Federal Tax Burden'] = yearly_net_post_state_tax * -self.federal_tax_rate\n\n # 5) Add the overall tax burden (= state tax + federal tax) to proforma\n tax_calcs['Overall Tax Burden'] = tax_calcs['State Tax Burden'] + tax_calcs['Federal Tax Burden']\n proforma['State Tax Burden'] = tax_calcs['State Tax Burden']\n proforma['Federal Tax Burden'] = tax_calcs['Federal Tax Burden']\n proforma['Overall Tax Burden'] = tax_calcs['Overall Tax Burden']\n self.tax_calculations = tax_calcs\n return proforma", "def sub_total():\n return sum(SAVE_PRICE)", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def tax(subtotal, discount):\n return (subtotal - discount) * 0.12", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def tax_calculator(tax, cost):\n return float(tax * cost)", "def _get_toal_sp_(obj):\n \n fTotalSP = 0.0\n for item in obj.order_line:\n fTotalSP += item.price_subtotal\n \n return fTotalSP", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def _compute_gasto_subtotal(self):\n for sub in self:\n sub.recurring_total = sum(\n line.gasto for line in sub.animales_ids)", "def calculate_total_price(total, taxes):\n total_price = total + taxes\n return total_price", "def get_quote_taxation(self):\n if ProductInfo.taxation:\n total, discount = self.get_total_quote_price(), self.get_quote_discount()\n return (total - discount) * 0.09\n else:\n return 0", "def get_final_quote_price(self):\n total, discount, taxation = self.get_total_quote_price(), self.get_quote_discount(), self.get_quote_taxation()\n return (total - discount) + taxation", "def subtotal(balance,selected_product):\n balance = balance + ((selected_product[\"price\"]))\n return balance", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100", "def basket_total_before_discounts_incl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_incl_tax\"))\n return result[\"total\"]", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total", "def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):\n if len(self) == 0:\n company_id = self.env.user.company_id\n else:\n company_id = self[0].company_id\n if not currency:\n currency = company_id.currency_id\n taxes = []\n # By default, for each tax, tax amount will first be computed\n # and rounded at the 'Account' decimal precision for each\n # PO/SO/invoice line and then these rounded amounts will be\n # summed, leading to the total amount for that tax. But, if the\n # company has tax_calculation_rounding_method = round_globally,\n # we still follow the same method, but we use a much larger\n # precision when we round the tax amount for each line (we use\n # the 'Account' decimal precision + 5), and that way it's like\n # rounding after the sum of the tax amounts of each line\n prec = currency.decimal_places\n\n # In some cases, it is necessary to force/prevent the rounding of the tax and the total\n # amounts. For example, in SO/PO line, we don't want to round the price unit at the\n # precision of the currency.\n # The context key 'round' allows to force the standard behavior.\n round_tax = False if company_id.tax_calculation_rounding_method == 'round_globally' else True\n round_total = True\n if 'round' in self.env.context:\n round_tax = bool(self.env.context['round'])\n round_total = bool(self.env.context['round'])\n\n if not round_tax:\n prec += 5\n\n base_values = self.env.context.get('base_values')\n if not base_values:\n total_excluded = total_included = base = round(price_unit * quantity, prec)\n else:\n total_excluded, total_included, base = base_values\n\n # Sorting key is mandatory in this case. When no key is provided, sorted() will perform a\n # search. However, the search method is overridden in account.tax in order to add a domain\n # depending on the context. This domain might filter out some taxes from self, e.g. in the\n # case of group taxes.\n for tax in self.sorted(key=lambda r: r.sequence):\n # Allow forcing price_include/include_base_amount through the context for the reconciliation widget.\n # See task 24014.\n price_include = self._context.get('force_price_include', tax.price_include)\n\n if tax.amount_type == 'group':\n children = tax.children_tax_ids.with_context(base_values=(total_excluded, total_included, base))\n ret = children.compute_all(price_unit, currency, quantity, product, partner)\n total_excluded = ret['total_excluded']\n base = ret['base'] if tax.include_base_amount else base\n total_included = ret['total_included']\n tax_amount = total_included - total_excluded\n taxes += ret['taxes']\n continue\n\n tax_amount = tax._compute_amount(base, price_unit, quantity, product, partner)\n if not round_tax:\n tax_amount = round(tax_amount, prec)\n else:\n tax_amount = currency.round(tax_amount)\n\n if price_include:\n total_excluded -= tax_amount\n base -= tax_amount\n else:\n total_included += tax_amount\n\n # Keep base amount used for the current tax\n tax_base = base\n\n if tax.include_base_amount:\n base += tax_amount\n\n taxes.append({\n 'id': tax.id,\n 'name': tax.with_context(**{'lang': partner.lang} if partner else {}).name,\n 'amount': tax_amount,\n 'base': tax_base,\n 'sequence': tax.sequence,\n 'account_id': tax.account_id.id,\n 'refund_account_id': tax.refund_account_id.id,\n 'analytic': tax.analytic,\n 'price_include': tax.price_include,\n 'tax_exigibility': tax.tax_exigibility,\n })\n\n return {\n 'taxes': sorted(taxes, key=lambda k: k['sequence']),\n 'total_excluded': currency.round(total_excluded) if round_total else total_excluded,\n 'total_included': currency.round(total_included) if round_total else total_included,\n 'base': base,\n }", "def get_price_including_tax(article):\n price_with_tax = article.select(\"tr\")\n return price_with_tax[3].td.text", "def fee_VS_tx_value(df):\n\n total_fees = df['Tx fees (USD)']\n tx_vol_USD = df['Tx Volume (USD)']\n result = total_fees.div(tx_vol_USD)\n result.name = 'Tx Fees / Tx Volume'\n return out(SETTINGS, df, result)" ]
[ "0.6498725", "0.637397", "0.63043004", "0.62905616", "0.6242464", "0.59541345", "0.5881588", "0.5839122", "0.5801799", "0.57798594", "0.5711908", "0.5685891", "0.5622643", "0.55404156", "0.5537151", "0.5537049", "0.5476859", "0.5469033", "0.5468134", "0.5453045", "0.54431385", "0.543735", "0.54269195", "0.542448", "0.53557813", "0.5330717", "0.5325669", "0.5321068", "0.53150374", "0.5311057" ]
0.66625917
0
It calculates the subquery for the field Tax_Cost_Transfer_pricing
def sub_tax_cost_transfer_pricing(manager, df_fields, seq_recs, seq_reservas): # df_hotel = manager.get_dataframe(tables["dwc_bok_t_canco_hotel"]) # df_circuit = manager.get_dataframe(tables["dwc_bok_t_canco_hotel_circuit"]) # df_other = manager.get_dataframe(tables["dwc_bok_t_canco_other"]) # df_transfer = manager.get_dataframe(tables["dwc_bok_t_canco_transfer"]) # df_endow = manager.get_dataframe(tables["dwc_bok_t_canco_endowments"]) # df_extra = manager.get_dataframe(tables["dwc_bok_t_canco_extra"]) df_aux = df_fields.select("operative_incoming", "booking_id", "invoicing_company", "creation_date", "booking_currency") df_hotel = sub_tax_cost_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux) df_circuit = sub_tax_cost_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux) df_other = sub_tax_cost_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux) df_transfer = sub_tax_cost_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux) df_endow = sub_tax_cost_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux) df_extra = sub_tax_cost_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux) df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union( df_extra) df_impuesto_canco = df_impuesto_canco.groupBy("operative_incoming", "booking_id") \ .agg({'impuesto_canco': 'sum'}).withColumnRenamed("SUM(impuesto_canco)", "impuesto_canco") df_impuesto_canco = df_impuesto_canco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") # add impuesto_canco df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec, df_fields.booking_id == df_impuesto_canco.seq_res], 'left_outer').drop("seq_rec", "seq_res") df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas) df_addcanco = df_addcanco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") # add add_impuesto_canco df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec, df_fields.booking_id == df_addcanco.seq_res], "left_outer").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res) df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0}) df_fields = df_fields.withColumn("Tax_Cost_Transfer_pricing", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \ .drop("impuesto_canco", "add_impuesto_canco") df_fields = df_fields.withColumn("Tax_Cost_Transfer_pricing", udf_round_ccy(df_fields.Tax_Cost_Transfer_pricing, df_fields.booking_currency)) del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux return df_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel'])\n # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit'])\n # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other'])\n # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer'])\n # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments'])\n # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra'])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\")\n\n df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canal = df_impuesto_canal.groupBy(\"seq_rec\", \"seq_reserva\") \\\n .agg({'impuesto_canal': 'sum'}).withColumnRenamed(\"SUM(impuesto_canal)\", \"Tax_Sales_Transfer_pricing\")\n\n df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec,\n df_fields.booking_id == df_impuesto_canal.seq_reserva],\n 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva)\n\n df_fields = df_fields.na.fill({\"Tax_Sales_Transfer_pricing\": 0})\n\n df_fields = df_fields.withColumn(\"Tax_Sales_Transfer_pricing\",\n udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal\n\n return df_fields", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.lst_price,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price2 = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.list_price2,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def tax_calculator(tax, cost):\n return float(tax * cost)", "def calculate_taxes(self, proforma, technologies):\n tax_calcs = copy.deepcopy(proforma)\n # 1) Redistribute capital cost according to the DER's MACRS value to get depreciation\n for der_inst in technologies:\n tax_contribution = der_inst.tax_contribution(self.macrs_depreciation,\n tax_calcs.index, self.start_year)\n if tax_contribution is not None:\n tax_calcs = pd.concat([tax_calcs, tax_contribution], axis=1)\n # 2) calculate yearly_net (taking into account the taxable contribution of each technology\n # asset)\n yearly_net = tax_calcs.sum(axis=1)\n tax_calcs['Taxable Yearly Net'] = yearly_net\n\n # 3) Calculate State tax based on the net cash flows in each year\n tax_calcs['State Tax Burden'] = yearly_net * -self.state_tax_rate\n\n # 4) Calculate Federal tax based on the net cash flow in each year minus State taxes\n # from that year\n yearly_net_post_state_tax = yearly_net + tax_calcs['State Tax Burden']\n tax_calcs['Federal Tax Burden'] = yearly_net_post_state_tax * -self.federal_tax_rate\n\n # 5) Add the overall tax burden (= state tax + federal tax) to proforma\n tax_calcs['Overall Tax Burden'] = tax_calcs['State Tax Burden'] + tax_calcs['Federal Tax Burden']\n proforma['State Tax Burden'] = tax_calcs['State Tax Burden']\n proforma['Federal Tax Burden'] = tax_calcs['Federal Tax Burden']\n proforma['Overall Tax Burden'] = tax_calcs['Overall Tax Burden']\n self.tax_calculations = tax_calcs\n return proforma", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def _compute_gasto_subtotal(self):\n for sub in self:\n sub.recurring_total = sum(\n line.gasto for line in sub.animales_ids)", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def get_final_quote_price(self):\n total, discount, taxation = self.get_total_quote_price(), self.get_quote_discount(), self.get_quote_taxation()\n return (total - discount) + taxation", "def tax(subtotal, discount):\n return (subtotal - discount) * 0.12", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def _compute_calculate_cost(self):\n for order in self:\n amount_calculate_cost = 0.0\n for line in order.order_line:\n amount_calculate_cost += (line.product_id.standard_price * line.product_uom_qty)\n order.update({\n 'amount_calculate_cost': amount_calculate_cost\n })", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def trading_cost(self) -> float:\n return self.__trading_cost", "def get_quote_taxation(self):\n if ProductInfo.taxation:\n total, discount = self.get_total_quote_price(), self.get_quote_discount()\n return (total - discount) * 0.09\n else:\n return 0", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100", "def _get_toal_cp_(obj):\n \n fTotal = 0.0\n for item in obj.order_line:\n fTotal += item.purchase_price * item.product_uom_qty\n \n return fTotal", "def sub_total():\n return sum(SAVE_PRICE)", "def subtotal(balance,selected_product):\n balance = balance + ((selected_product[\"price\"]))\n return balance", "def total_discount_incl_tax(self):\n discount = D(\"0.00\")\n for line in self.lines.all():\n discount += line.discount_incl_tax\n return discount", "def _compute_amount_subtotal(self):\n for lines in self:\n lines.price_subtotal = lines.price_unit * lines.order_qty" ]
[ "0.65499413", "0.6401581", "0.62985355", "0.6264737", "0.6263573", "0.6209262", "0.5951682", "0.59322137", "0.5912796", "0.58500355", "0.5839168", "0.5828151", "0.57621986", "0.56807303", "0.5652692", "0.5645219", "0.5632515", "0.5612633", "0.55991054", "0.5577054", "0.5562695", "0.55541265", "0.5542165", "0.5540884", "0.5527066", "0.5523361", "0.5505042", "0.5454349", "0.5431109", "0.5428763" ]
0.6695558
0
It calculates the subquery for the field Tax_Transfer_pricing_EUR
def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas): # df_hotel = manager.get_dataframe(tables["dwc_bok_t_canco_hotel"]) # df_circuit = manager.get_dataframe(tables["dwc_bok_t_canco_hotel_circuit"]) # df_other = manager.get_dataframe(tables["dwc_bok_t_canco_other"]) # df_transfer = manager.get_dataframe(tables["dwc_bok_t_canco_transfer"]) # df_endow = manager.get_dataframe(tables["dwc_bok_t_canco_endowments"]) # df_extra = manager.get_dataframe(tables["dwc_bok_t_canco_extra"]) df_aux = df_fields.select("operative_incoming", "booking_id", "invoicing_company", "creation_date", "booking_currency") df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux) df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux) df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux) df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux) df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux) df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux) df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union( df_extra) df_impuesto_canco = df_impuesto_canco.groupBy("operative_incoming", "booking_id") \ .agg({'impuesto_canco': 'sum'}).withColumnRenamed("SUM(impuesto_canco)", "impuesto_canco") df_impuesto_canco = df_impuesto_canco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec, df_fields.booking_id == df_impuesto_canco.seq_res], 'left_outer').drop("seq_rec", "seq_res") df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR) df_addcanco = df_addcanco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") # add add_impuesto_canco df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec, df_fields.booking_id == df_addcanco.seq_res], "left_outer").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res) df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0}) df_fields = df_fields.withColumn("Tax_Transfer_pricing_EUR", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \ .drop("impuesto_canco", "add_impuesto_canco") del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco return df_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def sub_tax_cost_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_cost_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_cost_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_cost_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_cost_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_cost_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_cost_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\", udf_round_ccy(df_fields.Tax_Cost_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel'])\n # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit'])\n # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other'])\n # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer'])\n # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments'])\n # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra'])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\")\n\n df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canal = df_impuesto_canal.groupBy(\"seq_rec\", \"seq_reserva\") \\\n .agg({'impuesto_canal': 'sum'}).withColumnRenamed(\"SUM(impuesto_canal)\", \"Tax_Sales_Transfer_pricing\")\n\n df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec,\n df_fields.booking_id == df_impuesto_canal.seq_reserva],\n 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva)\n\n df_fields = df_fields.na.fill({\"Tax_Sales_Transfer_pricing\": 0})\n\n df_fields = df_fields.withColumn(\"Tax_Sales_Transfer_pricing\",\n udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal\n\n return df_fields", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.lst_price,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price2 = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.list_price2,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def get_final_quote_price(self):\n total, discount, taxation = self.get_total_quote_price(), self.get_quote_discount(), self.get_quote_taxation()\n return (total - discount) + taxation", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def _get_price_unit(self):\n self.ensure_one()\n if self.subcontract_line_id and self.product_id.id == self.subcontract_line_id.product_id.id:\n line = self.subcontract_line_id\n order = line.order_id\n price_unit = line.price_unit\n if line.taxes_id:\n price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=1.0)['total_excluded']\n if line.product_uom.id != line.product_id.uom_id.id:\n price_unit *= line.product_uom.factor / line.product_id.uom_id.factor\n if order.currency_id != order.company_id.currency_id:\n price_unit = order.currency_id._convert(\n price_unit, order.company_id.currency_id, order.company_id, self.date, round=False)\n return price_unit\n return super(StockMove, self)._get_price_unit()", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def calculate_taxes(self, proforma, technologies):\n tax_calcs = copy.deepcopy(proforma)\n # 1) Redistribute capital cost according to the DER's MACRS value to get depreciation\n for der_inst in technologies:\n tax_contribution = der_inst.tax_contribution(self.macrs_depreciation,\n tax_calcs.index, self.start_year)\n if tax_contribution is not None:\n tax_calcs = pd.concat([tax_calcs, tax_contribution], axis=1)\n # 2) calculate yearly_net (taking into account the taxable contribution of each technology\n # asset)\n yearly_net = tax_calcs.sum(axis=1)\n tax_calcs['Taxable Yearly Net'] = yearly_net\n\n # 3) Calculate State tax based on the net cash flows in each year\n tax_calcs['State Tax Burden'] = yearly_net * -self.state_tax_rate\n\n # 4) Calculate Federal tax based on the net cash flow in each year minus State taxes\n # from that year\n yearly_net_post_state_tax = yearly_net + tax_calcs['State Tax Burden']\n tax_calcs['Federal Tax Burden'] = yearly_net_post_state_tax * -self.federal_tax_rate\n\n # 5) Add the overall tax burden (= state tax + federal tax) to proforma\n tax_calcs['Overall Tax Burden'] = tax_calcs['State Tax Burden'] + tax_calcs['Federal Tax Burden']\n proforma['State Tax Burden'] = tax_calcs['State Tax Burden']\n proforma['Federal Tax Burden'] = tax_calcs['Federal Tax Burden']\n proforma['Overall Tax Burden'] = tax_calcs['Overall Tax Burden']\n self.tax_calculations = tax_calcs\n return proforma", "def sub_total():\n return sum(SAVE_PRICE)", "def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):\n\n self.ensure_one()\n\n if self.amount_type != 'margin':\n return super(AccountTax, self)._compute_amount(\n base_amount,\n price_unit,\n quantity=quantity,\n product=product,\n partner=partner\n )\n\n return base_amount - (base_amount / (1 + self.amount / 100))", "def _get_price_unit(self):\n self.ensure_one()\n if self.purchase_line_id and self.product_id.id == self.purchase_line_id.product_id.id:\n price_unit_prec = self.env['decimal.precision'].precision_get('Product Price')\n line = self.purchase_line_id\n order = line.order_id\n price_unit = line.price_unit\n if line.taxes_id:\n qty = line.product_qty or 1\n price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=qty)['total_void']\n # price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=1.0)['total_excluded']\n price_unit = float_round(price_unit / qty, precision_digits=price_unit_prec)\n if line.product_uom.id != line.product_id.uom_id.id:\n price_unit *= line.product_uom.factor / line.product_id.uom_id.factor\n if order.currency_id != order.company_id.currency_id:\n # The date must be today, and not the date of the move since the move move is still\n # in assigned state. However, the move date is the scheduled date until move is\n # done, then date of actual move processing. See:\n # https://github.com/odoo/odoo/blob/2f789b6863407e63f90b3a2d4cc3be09815f7002/addons/stock/models/stock_move.py#L36\n price_unit = order.currency_id._convert(\n price_unit, order.company_id.currency_id, order.company_id, self.picking_id.fecha_tipo_cambio, round=False)\n return price_unit\n return super(StockMove, self)._get_price_unit()", "def subtotal(balance,selected_product):\n balance = balance + ((selected_product[\"price\"]))\n return balance", "def get_quote_taxation(self):\n if ProductInfo.taxation:\n total, discount = self.get_total_quote_price(), self.get_quote_discount()\n return (total - discount) * 0.09\n else:\n return 0", "def fee_VS_tx_value(df):\n\n total_fees = df['Tx fees (USD)']\n tx_vol_USD = df['Tx Volume (USD)']\n result = total_fees.div(tx_vol_USD)\n result.name = 'Tx Fees / Tx Volume'\n return out(SETTINGS, df, result)", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def calculate_total_price(total, taxes):\n total_price = total + taxes\n return total_price", "def compute_fee_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n result = 0\n # Check if the session has min threshold and max threshold to get the right value for result\n if supplier_item.get('has_session_fee') and supplier_item.get(\n 'has_minimum_billing_threshold') and supplier_item.get('has_max_session_fee'):\n if supplier_item.get('min_billing_amount', 0) > supplier_item.get('session_fee', 0):\n result = supplier_item.get('min_billing_amount', 0)\n elif supplier_item.get('max_session_fee') > supplier_item['session_fee'] > supplier_item[\n 'min_billing_amount']:\n result = supplier_item.get('session_fee', 0)\n elif supplier_item.get('session_fee', 0) > supplier_item.get('max_session_fee'):\n result = supplier_item.get('max_session_fee')\n # Check for min threshold only to get the min bill\n elif supplier_item.get('has_session_fee') and supplier_item.get('has_minimum_billing_threshold'):\n if supplier_item.get('min_billing_amount') > supplier_item.get('session_fee'):\n result = supplier_item.get('min_billing_amount')\n elif supplier_item.get('session_fee') > supplier_item.get('min_billing_amount'):\n result = supplier_item.get('session_fee')\n return result", "def somme(self) -> Numeric:\n return query_sum(self.offres(), \"prix\", output_field=models.DecimalField())", "def get_price_including_tax(article):\n price_with_tax = article.select(\"tr\")\n return price_with_tax[3].td.text", "def _calc_line_base_price(self, cr, uid, line, context=None):\n return line.price_unit", "def get_total(self):\n\n base_price = 5\n total = (1 + int(self.tax)) * int(self.qty) * base_price\n\n return total" ]
[ "0.65058285", "0.6245366", "0.62209433", "0.60142535", "0.601361", "0.588141", "0.5866098", "0.5865709", "0.5844462", "0.5841258", "0.5803155", "0.5662047", "0.56258506", "0.5612775", "0.5599036", "0.55835426", "0.556036", "0.5547852", "0.5484853", "0.5466989", "0.545998", "0.54422355", "0.5426503", "0.54262185", "0.5426117", "0.54002625", "0.53967816", "0.537218", "0.53602844", "0.53496873" ]
0.6462657
1
It calculates the subquery for the field Tax_Cost_Transfer_pricing_EUR
def sub_tax_cost_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas): # df_hotel = manager.get_dataframe(tables["dwc_bok_t_canco_hotel"]) # df_circuit = manager.get_dataframe(tables["dwc_bok_t_canco_hotel_circuit"]) # df_other = manager.get_dataframe(tables["dwc_bok_t_canco_other"]) # df_transfer = manager.get_dataframe(tables["dwc_bok_t_canco_transfer"]) # df_endow = manager.get_dataframe(tables["dwc_bok_t_canco_endowments"]) # df_extra = manager.get_dataframe(tables["dwc_bok_t_canco_extra"]) df_aux = df_fields.select("operative_incoming", "booking_id", "invoicing_company", "creation_date", "booking_currency") df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux) df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux) df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux) df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux) df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux) df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux) df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union( df_extra) df_impuesto_canco = df_impuesto_canco.groupBy("operative_incoming", "booking_id") \ .agg({'impuesto_canco': 'sum'}).withColumnRenamed("SUM(impuesto_canco)", "impuesto_canco") df_impuesto_canco = df_impuesto_canco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec, df_fields.booking_id == df_impuesto_canco.seq_res], 'left_outer').drop("seq_rec", "seq_res") df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR) df_addcanco = df_addcanco.withColumnRenamed("operative_incoming", "seq_rec") \ .withColumnRenamed("booking_id", "seq_res") # add add_impuesto_canco df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec, df_fields.booking_id == df_addcanco.seq_res], "left_outer").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res) df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0}) df_fields = df_fields.withColumn("Tax_Cost_Transfer_pricing_EUR", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \ .drop("impuesto_canco", "add_impuesto_canco") del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco return df_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_tax_cost_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_cost_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_cost_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_cost_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_cost_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_cost_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_cost_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Tax_Cost_Transfer_pricing\", udf_round_ccy(df_fields.Tax_Cost_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def sub_tax_transfer_pricing_eur(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\", \"creation_date\",\n \"booking_currency\")\n\n df_hotel = sub_tax_transfer_pricing_eur_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_transfer_pricing_eur_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_transfer_pricing_eur_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_transfer_pricing_eur_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_transfer_pricing_eur_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_transfer_pricing_eur_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas, EUR)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Tax_Transfer_pricing_EUR\",\n df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco\n\n return df_fields", "def sub_tax_sales_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables['dwc_bok_t_canco_hotel'])\n # df_circuit = manager.get_dataframe(tables['dwc_bok_t_canco_hotel_circuit'])\n # df_other = manager.get_dataframe(tables['dwc_bok_t_canco_other'])\n # df_transfer = manager.get_dataframe(tables['dwc_bok_t_canco_transfer'])\n # df_endow = manager.get_dataframe(tables['dwc_bok_t_canco_endowments'])\n # df_extra = manager.get_dataframe(tables['dwc_bok_t_canco_extra'])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\")\n\n df_hotel = sub_tax_sales_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_tax_sales_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_tax_sales_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_tax_sales_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_tax_sales_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_tax_sales_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canal = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canal = df_impuesto_canal.groupBy(\"seq_rec\", \"seq_reserva\") \\\n .agg({'impuesto_canal': 'sum'}).withColumnRenamed(\"SUM(impuesto_canal)\", \"Tax_Sales_Transfer_pricing\")\n\n df_fields = df_fields.join(df_impuesto_canal, [df_fields.operative_incoming == df_impuesto_canal.seq_rec,\n df_fields.booking_id == df_impuesto_canal.seq_reserva],\n 'left_outer').drop(df_impuesto_canal.seq_rec).drop(df_impuesto_canal.seq_reserva)\n\n df_fields = df_fields.na.fill({\"Tax_Sales_Transfer_pricing\": 0})\n\n df_fields = df_fields.withColumn(\"Tax_Sales_Transfer_pricing\",\n udf_round_ccy(df_fields.Tax_Sales_Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canal\n\n return df_fields", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.lst_price,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def _compute_taxed_lst_price2(self):\n company_id = self._context.get(\n 'company_id', self.env.user.company_id.id)\n for product in self:\n product.taxed_lst_price2 = product.taxes_id.filtered(\n lambda x: x.company_id.id == company_id).compute_all(\n product.list_price2,\n self.env.user.company_id.currency_id,\n product=product)['total_included']", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def get_final_quote_price(self):\n total, discount, taxation = self.get_total_quote_price(), self.get_quote_discount(), self.get_quote_taxation()\n return (total - discount) + taxation", "def calculate_tax(subtotal):\n return \"TAX: \"+format_usd(0.0875*subtotal)", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def calculate_taxes(self, proforma, technologies):\n tax_calcs = copy.deepcopy(proforma)\n # 1) Redistribute capital cost according to the DER's MACRS value to get depreciation\n for der_inst in technologies:\n tax_contribution = der_inst.tax_contribution(self.macrs_depreciation,\n tax_calcs.index, self.start_year)\n if tax_contribution is not None:\n tax_calcs = pd.concat([tax_calcs, tax_contribution], axis=1)\n # 2) calculate yearly_net (taking into account the taxable contribution of each technology\n # asset)\n yearly_net = tax_calcs.sum(axis=1)\n tax_calcs['Taxable Yearly Net'] = yearly_net\n\n # 3) Calculate State tax based on the net cash flows in each year\n tax_calcs['State Tax Burden'] = yearly_net * -self.state_tax_rate\n\n # 4) Calculate Federal tax based on the net cash flow in each year minus State taxes\n # from that year\n yearly_net_post_state_tax = yearly_net + tax_calcs['State Tax Burden']\n tax_calcs['Federal Tax Burden'] = yearly_net_post_state_tax * -self.federal_tax_rate\n\n # 5) Add the overall tax burden (= state tax + federal tax) to proforma\n tax_calcs['Overall Tax Burden'] = tax_calcs['State Tax Burden'] + tax_calcs['Federal Tax Burden']\n proforma['State Tax Burden'] = tax_calcs['State Tax Burden']\n proforma['Federal Tax Burden'] = tax_calcs['Federal Tax Burden']\n proforma['Overall Tax Burden'] = tax_calcs['Overall Tax Burden']\n self.tax_calculations = tax_calcs\n return proforma", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def _get_price_unit(self):\n self.ensure_one()\n if self.subcontract_line_id and self.product_id.id == self.subcontract_line_id.product_id.id:\n line = self.subcontract_line_id\n order = line.order_id\n price_unit = line.price_unit\n if line.taxes_id:\n price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=1.0)['total_excluded']\n if line.product_uom.id != line.product_id.uom_id.id:\n price_unit *= line.product_uom.factor / line.product_id.uom_id.factor\n if order.currency_id != order.company_id.currency_id:\n price_unit = order.currency_id._convert(\n price_unit, order.company_id.currency_id, order.company_id, self.date, round=False)\n return price_unit\n return super(StockMove, self)._get_price_unit()", "def sub_transfer_pricing(manager, df_fields, seq_recs, seq_reservas):\n # df_hotel = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel\"])\n # df_circuit = manager.get_dataframe(tables[\"dwc_bok_t_canco_hotel_circuit\"])\n # df_other = manager.get_dataframe(tables[\"dwc_bok_t_canco_other\"])\n # df_transfer = manager.get_dataframe(tables[\"dwc_bok_t_canco_transfer\"])\n # df_endow = manager.get_dataframe(tables[\"dwc_bok_t_canco_endowments\"])\n # df_extra = manager.get_dataframe(tables[\"dwc_bok_t_canco_extra\"])\n\n df_aux = df_fields.select(\"operative_incoming\", \"booking_id\", \"invoicing_company\",\n \"creation_date\", \"booking_currency\")\n\n df_hotel = sub_transfer_pricing_aux(manager, df_hotelt, seq_recs, seq_reservas, df_aux)\n df_circuit = sub_transfer_pricing_aux(manager, df_circuitt, seq_recs, seq_reservas, df_aux)\n df_other = sub_transfer_pricing_aux(manager, df_othert, seq_recs, seq_reservas, df_aux)\n df_transfer = sub_transfer_pricing_aux(manager, df_transfert, seq_recs, seq_reservas, df_aux)\n df_endow = sub_transfer_pricing_aux(manager, df_endowt, seq_recs, seq_reservas, df_aux)\n df_extra = sub_transfer_pricing_aux_extra(manager, df_extrat, seq_recs, seq_reservas, df_aux)\n\n df_impuesto_canco = df_hotel.union(df_circuit).union(df_other).union(df_transfer).union(df_endow).union(\n df_extra)\n\n df_impuesto_canco = df_impuesto_canco.groupBy(\"operative_incoming\", \"booking_id\") \\\n .agg({'impuesto_canco': 'sum'}).withColumnRenamed(\"SUM(impuesto_canco)\", \"impuesto_canco\")\n\n df_impuesto_canco = df_impuesto_canco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add impuesto_canco\n df_fields = df_fields.join(df_impuesto_canco, [df_fields.operative_incoming == df_impuesto_canco.seq_rec,\n df_fields.booking_id == df_impuesto_canco.seq_res],\n 'left_outer').drop(\"seq_rec\", \"seq_res\")\n\n df_addcanco = sub_transfer_pricing_aux_add_canco(manager, df_fields, seq_recs, seq_reservas)\n\n df_addcanco = df_addcanco.withColumnRenamed(\"operative_incoming\", \"seq_rec\") \\\n .withColumnRenamed(\"booking_id\", \"seq_res\")\n\n # add add_impuesto_canco\n df_fields = df_fields.join(df_addcanco, [df_fields.operative_incoming == df_addcanco.seq_rec,\n df_fields.booking_id == df_addcanco.seq_res],\n \"left_outer\").drop(df_addcanco.seq_rec).drop(df_addcanco.seq_res)\n\n df_fields = df_fields.na.fill({'impuesto_canco': 0, 'add_impuesto_canco': 0})\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", df_fields.impuesto_canco + df_fields.add_impuesto_canco) \\\n .drop(\"impuesto_canco\", \"add_impuesto_canco\")\n\n df_fields = df_fields.withColumn(\"Transfer_pricing\", udf_round_ccy(df_fields.Transfer_pricing,\n df_fields.booking_currency))\n\n del df_hotel, df_circuit, df_other, df_transfer, df_endow, df_extra, df_impuesto_canco, df_addcanco, df_aux\n\n return df_fields", "def tax_calculator(tax, cost):\n return float(tax * cost)", "def trading_cost(self) -> float:\n return self.__trading_cost", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def sub_total():\n return sum(SAVE_PRICE)", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def calculate_total_price(total, taxes):\n total_price = total + taxes\n return total_price", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def compute_fee_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n result = 0\n # Check if the session has min threshold and max threshold to get the right value for result\n if supplier_item.get('has_session_fee') and supplier_item.get(\n 'has_minimum_billing_threshold') and supplier_item.get('has_max_session_fee'):\n if supplier_item.get('min_billing_amount', 0) > supplier_item.get('session_fee', 0):\n result = supplier_item.get('min_billing_amount', 0)\n elif supplier_item.get('max_session_fee') > supplier_item['session_fee'] > supplier_item[\n 'min_billing_amount']:\n result = supplier_item.get('session_fee', 0)\n elif supplier_item.get('session_fee', 0) > supplier_item.get('max_session_fee'):\n result = supplier_item.get('max_session_fee')\n # Check for min threshold only to get the min bill\n elif supplier_item.get('has_session_fee') and supplier_item.get('has_minimum_billing_threshold'):\n if supplier_item.get('min_billing_amount') > supplier_item.get('session_fee'):\n result = supplier_item.get('min_billing_amount')\n elif supplier_item.get('session_fee') > supplier_item.get('min_billing_amount'):\n result = supplier_item.get('session_fee')\n return result", "def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):\n\n self.ensure_one()\n\n if self.amount_type != 'margin':\n return super(AccountTax, self)._compute_amount(\n base_amount,\n price_unit,\n quantity=quantity,\n product=product,\n partner=partner\n )\n\n return base_amount - (base_amount / (1 + self.amount / 100))", "def _get_price_unit(self):\n self.ensure_one()\n if self.purchase_line_id and self.product_id.id == self.purchase_line_id.product_id.id:\n price_unit_prec = self.env['decimal.precision'].precision_get('Product Price')\n line = self.purchase_line_id\n order = line.order_id\n price_unit = line.price_unit\n if line.taxes_id:\n qty = line.product_qty or 1\n price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=qty)['total_void']\n # price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=1.0)['total_excluded']\n price_unit = float_round(price_unit / qty, precision_digits=price_unit_prec)\n if line.product_uom.id != line.product_id.uom_id.id:\n price_unit *= line.product_uom.factor / line.product_id.uom_id.factor\n if order.currency_id != order.company_id.currency_id:\n # The date must be today, and not the date of the move since the move move is still\n # in assigned state. However, the move date is the scheduled date until move is\n # done, then date of actual move processing. See:\n # https://github.com/odoo/odoo/blob/2f789b6863407e63f90b3a2d4cc3be09815f7002/addons/stock/models/stock_move.py#L36\n price_unit = order.currency_id._convert(\n price_unit, order.company_id.currency_id, order.company_id, self.picking_id.fecha_tipo_cambio, round=False)\n return price_unit\n return super(StockMove, self)._get_price_unit()", "def subtotal(balance,selected_product):\n balance = balance + ((selected_product[\"price\"]))\n return balance", "def _get_toal_cp_(obj):\n \n fTotal = 0.0\n for item in obj.order_line:\n fTotal += item.purchase_price * item.product_uom_qty\n \n return fTotal", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def calculate_price(self):\n\n cargo_weight = self.cargo.weight\n tax_rate = Decimal(0.18)\n\n untaxed_total = Decimal(cargo_weight) * Decimal(self.price_per_unit_weight)\n\n total_price = (untaxed_total * tax_rate) + untaxed_total\n\n return total_price", "def _compute_amount_subtotal(self):\n for lines in self:\n lines.price_subtotal = lines.price_unit * lines.order_qty" ]
[ "0.64423364", "0.6376252", "0.6182043", "0.61586916", "0.6076942", "0.6071446", "0.59395057", "0.5931084", "0.586201", "0.5850323", "0.5799269", "0.57859534", "0.5713411", "0.5647553", "0.5607687", "0.5596589", "0.5572917", "0.5561368", "0.5530456", "0.5508553", "0.5498966", "0.54938394", "0.5492412", "0.5464626", "0.54608977", "0.5452935", "0.545083", "0.54312783", "0.5430803", "0.5415995" ]
0.66247785
0
This function constructs the integrator to be suitable with casadi environment, for the equations of the model and the objective function with variable time step.
def integrator_model(self): xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \ = self.DAE_system() dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u), 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)} opts = {'tf': self.tf / self.nk} # interval length F = integrator('F', 'idas', dae, opts) # model = functools.partial(solver, np.zeros(np.shape(xa))) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integrator_model(self):\n\n xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \\\n = self.DAE_system()\n ODEeq_ = vertcat(*ODEeq)\n\n self.ODEeq = Function('f', [xd, u], [vertcat(*ODEeq)], ['x0', 'p'], ['xdot'])\n\n dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),\n 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}\n opts = {'tf': self.tf / self.nk} # interval length\n F = integrator('F', 'idas', dae, opts)\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return F", "def integrate_casadi(self, problem, y0, t_eval, mass_matrix=None):\n options = {\n \"grid\": t_eval,\n \"reltol\": self.rtol,\n \"abstol\": self.atol,\n \"output_t0\": True,\n }\n options.update(self.extra_options)\n if self.method == \"idas\":\n options[\"calc_ic\"] = True\n\n # set up and solve\n integrator = casadi.integrator(\"F\", self.method, problem, options)\n try:\n # Try solving\n len_rhs = problem[\"x\"].size()[0]\n y0_diff, y0_alg = np.split(y0, [len_rhs])\n sol = integrator(x0=y0_diff, z0=y0_alg)\n y_values = np.concatenate([sol[\"xf\"].full(), sol[\"zf\"].full()])\n return pybamm.Solution(t_eval, y_values, None, None, \"final time\")\n except RuntimeError as e:\n # If it doesn't work raise error\n raise pybamm.SolverError(e.args[0])", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, nsteps=10, timestep=1 * simtk.unit.femtoseconds):\n\n super(HMCIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow Context updating here, outside of inner loop only.\n #\n self.addUpdateContextState()\n\n #\n # Draw new velocity.\n #\n self.addComputePerDof(\"v\", \"sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Store old position and energy.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n\n #\n # Inner symplectic steps using velocity Verlet.\n #\n for step in range(nsteps):\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Accept/reject step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)#, {'error_on_fail':False})\n\n return solver", "def integrate(self, t):", "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)", "def __init__(self,head_offset=0,aquifer_type='unconfined',domain_center=0+0j,\r\n domain_radius=1,H = None,variables=[],priors=[],observations=[]):\r\n \r\n import numpy as np\r\n \r\n # Set potential scaling variables\r\n self.head_offset = head_offset\r\n self.aquifer_type = aquifer_type\r\n self.H = H\r\n \r\n # Set domain scaling variables\r\n self.domain_center = domain_center\r\n self.domain_radius = domain_radius\r\n \r\n if not np.isscalar(self.domain_center):\r\n self.domain_center = self.domain_center[0] + 1j*self.domain_center[1]\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n # Define a list for Analytic Elements\r\n self.elementlist = []\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n self.observations = observations\r\n \r\n # This function scrapes the model and its elements for unknown variables,\r\n # then gives this instance three new variables:\r\n # self.num_params Number of unknown variables\r\n # self.params List of unknown variables\r\n # self.param_names List of names of unknown variables\r\n # self.priors List of prior dictionaries for unknow variables\r\n self.take_parameter_inventory()\r\n \r\n self.linear_solver = False\r\n \r\n # Pre-allocate the function matrix and parameter vector for the linear solver\r\n self.matrix_solver = []\r\n self.params_vector = []", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def integrate(self, dt):\n f = lambda t, y: self.Cm * (np.dot(self.phi, self.activation(y)) + np.dot(self.phi_input, self.activation(self.input)) - self.Gm * y)\n self.Vm = rk4(self.t, self.Vm, self.t + dt, f)", "def __init__(self,delta, advect, diffuse, maxvel, dim=1):\n\n print 'Initializing RK4 integrator';\n self.dx = delta;\n self.cflConstant = 0.5;\n if(dim == 3):\n print 'In 3D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate3D;\n self.maxvelocity = maxvel;\n\n elif(dim == 2):\n print 'In 2D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate2D;\n self.maxvelocity = maxvel;\n\n elif (dim == 1):\n print 'In 1D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate1D;\n self.maxvelocity = maxvel;\n print 'Done';", "def C(self,t,K,c0):\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('lsoda')\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('vode', method='bdf', order=15)\n \n # if we have any negative times we assume they occur before the \n # reaction starts hence all negative times are assigned concentration \n # c0\n \n ## could switch to something like ode15s that the oiginal matlab code \n ## uses - can odeint cope with equations as stiff as we need?\n ## to use integrate.ode need order of arguments in dc_dt to switch\n \n #r = scipy.integrate.ode(self.dc_dt)\n #r = r.set_integrator('vode', method='bdf', order=15,nsteps=3000)\n #r = r.set_initial_value(c0)\n #r = r.set_f_params((K,))\n #r.integrate(t)\n \n static_times = t[t<0]\n dynamic_times = t[t>=0]\n\n static_C = np.array([c0 for _ in static_times])\n\n # odeint always takes the first time point as t0\n # our t0 is always 0 (removing t0 occures before we integrate)\n # so if the first time point is not 0 we add it \n \n if not dynamic_times.any() or dynamic_times[0]:\n #fancy indexing returns a copy so we can do this\n dynamic_times = np.hstack([[0],dynamic_times]) \n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))[1:]\n else:\n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))\n \n if static_C.any():\n return np.vstack([static_C,dynamic_C])\n else:\n return dynamic_C", "def j(U, Q, mesh, T, num_steps, params):\n \n # Define parameters for cost functional\n alpha = params[\"alpha\"]\n u_d = params[\"u_d\"]\n \n # Compute integrals with time\n I1 = 0\n I3 = 0\n \n t = 0\n dt = T/num_steps\n for i in range(num_steps + 1):\n I1_int = assemble((U[i] - u_d[i])*(U[i] - u_d[i])*dx(mesh))\n I3_int = assemble(Q[i]*Q[i]*dx(mesh))\n \n if i == 0 or i == num_steps:\n I1_int *= 0.5\n I3_int *= 0.5\n \n I1 += I1_int\n I3 += I3_int\n \n t += dt\n \n \n I1 *= dt\n I3 *= dt*alpha/2\n \n # Compute end time integral\n \n print(\"Cost Functional Data\")\n print(\"I1: {}\".format(I1))\n print(\"I3: {}\".format(I3))\n print()\n \n return I1 + I3", "def __init__(\n self,\n biorbd_model,\n dynamics_type,\n number_shooting_points,\n phase_time,\n X_init,\n U_init,\n X_bounds,\n U_bounds,\n objective_functions=ObjectiveList(),\n constraints=ConstraintList(),\n parameters=ParameterList(),\n external_forces=(),\n ode_solver=OdeSolver.RK,\n nb_integration_steps=5,\n control_type=ControlType.CONSTANT,\n all_generalized_mapping=None,\n q_mapping=None,\n q_dot_mapping=None,\n tau_mapping=None,\n plot_mappings=None,\n state_transitions=StateTransitionList(),\n nb_threads=1,\n use_SX=False,\n ):\n\n if isinstance(biorbd_model, str):\n biorbd_model = [biorbd.Model(biorbd_model)]\n elif isinstance(biorbd_model, biorbd.biorbd.Model):\n biorbd_model = [biorbd_model]\n elif isinstance(biorbd_model, (list, tuple)):\n biorbd_model = [biorbd.Model(m) if isinstance(m, str) else m for m in biorbd_model]\n else:\n raise RuntimeError(\"biorbd_model must either be a string or an instance of biorbd.Model()\")\n self.version = {\"casadi\": casadi.__version__, \"biorbd\": biorbd.__version__, \"biorbd_optim\": __version__}\n self.nb_phases = len(biorbd_model)\n\n biorbd_model_path = [m.path().relativePath().to_string() for m in biorbd_model]\n self.original_values = {\n \"biorbd_model\": biorbd_model_path,\n \"dynamics_type\": dynamics_type,\n \"number_shooting_points\": number_shooting_points,\n \"phase_time\": phase_time,\n \"X_init\": X_init,\n \"U_init\": U_init,\n \"X_bounds\": X_bounds,\n \"U_bounds\": U_bounds,\n \"objective_functions\": ObjectiveList(),\n \"constraints\": ConstraintList(),\n \"parameters\": ParameterList(),\n \"external_forces\": external_forces,\n \"ode_solver\": ode_solver,\n \"nb_integration_steps\": nb_integration_steps,\n \"control_type\": control_type,\n \"all_generalized_mapping\": all_generalized_mapping,\n \"q_mapping\": q_mapping,\n \"q_dot_mapping\": q_dot_mapping,\n \"tau_mapping\": tau_mapping,\n \"plot_mappings\": plot_mappings,\n \"state_transitions\": state_transitions,\n \"nb_threads\": nb_threads,\n \"use_SX\": use_SX,\n }\n\n # Check integrity of arguments\n if not isinstance(nb_threads, int) or isinstance(nb_threads, bool) or nb_threads < 1:\n raise RuntimeError(\"nb_threads should be a positive integer greater or equal than 1\")\n\n if isinstance(dynamics_type, DynamicsTypeOption):\n dynamics_type_tp = DynamicsTypeList()\n dynamics_type_tp.add(dynamics_type)\n dynamics_type = dynamics_type_tp\n elif not isinstance(dynamics_type, DynamicsTypeList):\n raise RuntimeError(\"dynamics_type should be a DynamicsTypeOption or a DynamicsTypeList\")\n\n ns = number_shooting_points\n if not isinstance(ns, int) or ns < 2:\n if isinstance(ns, (tuple, list)):\n if sum([True for i in ns if not isinstance(i, int) and not isinstance(i, bool)]) != 0:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n else:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n nstep = nb_integration_steps\n if not isinstance(nstep, int) or isinstance(nstep, bool) or nstep < 1:\n raise RuntimeError(\"nb_integration_steps should be a positive integer greater or equal than 1\")\n\n if not isinstance(phase_time, (int, float)):\n if isinstance(phase_time, (tuple, list)):\n if sum([True for i in phase_time if not isinstance(i, (int, float))]) != 0:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n else:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n\n if isinstance(X_init, InitialConditionsOption):\n X_init_tp = InitialConditionsList()\n X_init_tp.add(X_init)\n X_init = X_init_tp\n elif not isinstance(X_init, InitialConditionsList):\n raise RuntimeError(\"X_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(U_init, InitialConditionsOption):\n U_init_tp = InitialConditionsList()\n U_init_tp.add(U_init)\n U_init = U_init_tp\n elif not isinstance(U_init, InitialConditionsList):\n raise RuntimeError(\"U_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(X_bounds, BoundsOption):\n X_bounds_tp = BoundsList()\n X_bounds_tp.add(X_bounds)\n X_bounds = X_bounds_tp\n elif not isinstance(X_bounds, BoundsList):\n raise RuntimeError(\"X_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(U_bounds, BoundsOption):\n U_bounds_tp = BoundsList()\n U_bounds_tp.add(U_bounds)\n U_bounds = U_bounds_tp\n elif not isinstance(U_bounds, BoundsList):\n raise RuntimeError(\"U_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(objective_functions, ObjectiveOption):\n objective_functions_tp = ObjectiveList()\n objective_functions_tp.add(objective_functions)\n objective_functions = objective_functions_tp\n elif not isinstance(objective_functions, ObjectiveList):\n raise RuntimeError(\"objective_functions should be built from an ObjectiveOption or ObjectiveList\")\n\n if isinstance(constraints, ConstraintOption):\n constraints_tp = ConstraintList()\n constraints_tp.add(constraints)\n constraints = constraints_tp\n elif not isinstance(constraints, ConstraintList):\n raise RuntimeError(\"constraints should be built from an ConstraintOption or ConstraintList\")\n\n if not isinstance(parameters, ParameterList):\n raise RuntimeError(\"parameters should be built from an ParameterList\")\n\n if not isinstance(state_transitions, StateTransitionList):\n raise RuntimeError(\"state_transitions should be built from an StateTransitionList\")\n\n if not isinstance(ode_solver, OdeSolver):\n raise RuntimeError(\"ode_solver should be built an instance of OdeSolver\")\n\n if not isinstance(use_SX, bool):\n raise RuntimeError(\"use_SX should be a bool\")\n\n # Declare optimization variables\n self.J = []\n self.g = []\n self.g_bounds = []\n self.V = []\n self.V_bounds = Bounds(interpolation=InterpolationType.CONSTANT)\n self.V_init = InitialConditions(interpolation=InterpolationType.CONSTANT)\n self.param_to_optimize = {}\n\n # nlp is the core of a phase\n self.nlp = [{} for _ in range(self.nb_phases)]\n self.__add_to_nlp(\"model\", biorbd_model, False)\n self.__add_to_nlp(\"phase_idx\", [i for i in range(self.nb_phases)], False)\n\n # Type of CasADi graph\n if use_SX:\n self.CX = SX\n else:\n self.CX = MX\n\n # Define some aliases\n self.__add_to_nlp(\"ns\", number_shooting_points, False)\n for nlp in self.nlp:\n if nlp[\"ns\"] < 1:\n raise RuntimeError(\"Number of shooting points must be at least 1\")\n self.initial_phase_time = phase_time\n phase_time, initial_time_guess, time_min, time_max = self.__init_phase_time(\n phase_time, objective_functions, constraints\n )\n self.__add_to_nlp(\"tf\", phase_time, False)\n self.__add_to_nlp(\"t0\", [0] + [nlp[\"tf\"] for i, nlp in enumerate(self.nlp) if i != len(self.nlp) - 1], False)\n self.__add_to_nlp(\"dt\", [self.nlp[i][\"tf\"] / max(self.nlp[i][\"ns\"], 1) for i in range(self.nb_phases)], False)\n self.nb_threads = nb_threads\n self.__add_to_nlp(\"nb_threads\", nb_threads, True)\n self.solver_type = Solver.NONE\n self.solver = None\n\n # External forces\n if external_forces != ():\n external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)\n self.__add_to_nlp(\"external_forces\", external_forces, False)\n\n # Compute problem size\n if all_generalized_mapping is not None:\n if q_mapping is not None or q_dot_mapping is not None or tau_mapping is not None:\n raise RuntimeError(\"all_generalized_mapping and a specified mapping cannot be used alongside\")\n q_mapping = q_dot_mapping = tau_mapping = all_generalized_mapping\n self.__add_to_nlp(\"q_mapping\", q_mapping, q_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"q_dot_mapping\", q_dot_mapping, q_dot_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"tau_mapping\", tau_mapping, tau_mapping is None, BidirectionalMapping)\n plot_mappings = plot_mappings if plot_mappings is not None else {}\n reshaped_plot_mappings = []\n for i in range(self.nb_phases):\n reshaped_plot_mappings.append({})\n for key in plot_mappings:\n reshaped_plot_mappings[i][key] = plot_mappings[key][i]\n self.__add_to_nlp(\"plot_mappings\", reshaped_plot_mappings, False)\n\n # Prepare the parameters to optimize\n self.state_transitions = []\n if len(parameters) > 0:\n self.update_parameters(parameters)\n\n # Declare the time to optimize\n self.__define_variable_time(initial_time_guess, time_min, time_max)\n\n # Prepare the dynamics of the program\n self.__add_to_nlp(\"dynamics_type\", dynamics_type, False)\n self.__add_to_nlp(\"ode_solver\", ode_solver, True)\n self.__add_to_nlp(\"control_type\", control_type, True)\n for i in range(self.nb_phases):\n self.__initialize_nlp(self.nlp[i])\n Problem.initialize(self, self.nlp[i])\n\n # Prepare path constraints\n self.__add_to_nlp(\"X_bounds\", X_bounds, False)\n self.__add_to_nlp(\"U_bounds\", U_bounds, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Prepare initial guesses\n self.__add_to_nlp(\"X_init\", X_init, False)\n self.__add_to_nlp(\"U_init\", U_init, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_init\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Variables and constraint for the optimization program\n for i in range(self.nb_phases):\n self.__define_multiple_shooting_nodes_per_phase(self.nlp[i], i)\n\n # Define dynamic problem\n self.__add_to_nlp(\n \"nb_integration_steps\", nb_integration_steps, True\n ) # Number of steps of integration (for now only RK4 steps are implemented)\n for i in range(self.nb_phases):\n if self.nlp[0][\"nx\"] != self.nlp[i][\"nx\"] or self.nlp[0][\"nu\"] != self.nlp[i][\"nu\"]:\n raise RuntimeError(\"Dynamics with different nx or nu is not supported yet\")\n self.__prepare_dynamics(self.nlp[i])\n\n # Prepare phase transitions (Reminder, it is important that parameters are declared\n # before, otherwise they will erase the state_transitions)\n self.state_transitions = StateTransitionFunctions.prepare_state_transitions(self, state_transitions)\n\n # Inner- and inter-phase continuity\n ContinuityFunctions.continuity(self)\n\n # Prepare constraints\n self.update_constraints(constraints)\n\n # Prepare objectives\n self.update_objectives(objective_functions)", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Initialize constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(GHMCIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"vold\", 0) # old velocities\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Constrain positions.\n #\n self.addConstrainPositions()\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputePerDof(\"vold\", \"v\")\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n self.addComputePerDof(\"v\", \"v*accept - vold*(1-accept)\")\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def Optimize_tau(x,dt):\n\n xdot = central_diff(x,dt)\n var_xdot = np.var(xdot)\n tau_target = CorrelationTime(x,dt=dt)\n\n \n k = var_xdot\n beta0 = 0.1\n alpha = 1.0/k # beta/D ratio\n\n @jit\n def f1(x): # spring force\n return k*x\n \n MySys = Stochastic_Oscillator(f1,beta0,beta0 / alpha)\n R = 5000 # how many taus required for integration\n MySys.Match_Correlation(tau_target,np.array([1e-2,20]),alpha,T=R*tau_target,N=int(R*100))\n MySys.k=k\n\n return MySys", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Compute constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(VVVRIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()", "def build_system(u, dt, dx, D=4, C=1, time_diff='FD', space_diff='FD', width_x=None,\n width_t=None, deg_x=5, deg_t=None):\n\n n, m = u.shape\n\n if width_x == None: width_x = n / 10\n if width_t == None: width_t = m / 10\n if deg_t == None: deg_t = deg_x\n\n # If we're using polynomials to take derviatives, then we toss the data around the edges.\n if time_diff == 'poly':\n m2 = m - 2 * width_t\n offset_t = width_t\n else:\n m2 = m\n offset_t = 0\n if space_diff == 'poly':\n n2 = n - 2 * width_x\n offset_x = width_x\n else:\n n2 = n\n offset_x = 0\n\n ########################\n # First take the time derivaitve for the left hand side of the equation\n ########################\n ut = np.zeros((n2, m2), dtype=np.complex64)\n\n if time_diff == 'poly':\n T = np.linspace(0, (m - 1) * dt, m)\n for i in range(n2):\n ut[i, :] = PolyDiff(u[i + offset_x, :], T, diff=1, width=width_t, deg=deg_t)[:, 0]\n\n else:\n for i in range(n2):\n ut[i, :] = FiniteDiff(u[i + offset_x, :], dt, 1)\n\n ut = np.reshape(ut, (n2 * m2, 1), order='F')\n\n ########################\n # Now form the rhs one column at a time, and record what each one is\n ########################\n\n u2 = u[offset_x:n - offset_x, offset_t:m - offset_t]\n Theta = np.zeros((n2 * m2, (D + 1) * C), dtype=np.complex64)\n ux = np.zeros((n2, m2), dtype=np.complex64)\n rhs_description = ['' for i in range((D + 1) * C)]\n\n if space_diff == 'poly':\n Du = {}\n for i in range(m2):\n Du[i] = PolyDiff(u[:, i + offset_t], np.linspace(0, (n - 1) * dx, n), diff=D, width=width_x, deg=deg_x)\n if space_diff == 'Fourier': ik = 1j * np.fft.fftfreq(n) * n\n\n for d in range(D + 1):\n # compute derivatives of d degree\n if d > 0:\n for i in range(m2):\n if space_diff == 'FD':\n ux[:, i] = FiniteDiff(u[:, i + offset_t], dx, d)\n elif space_diff == 'poly':\n ux[:, i] = Du[i][:, d - 1]\n else:\n ux = np.array(u2, dtype=np.complex64)\n # if d == 1: print(ux)\n\n # compute polynomials of all terms, c used as c+1\n for c in range(C):\n Theta[:, d * C + c] = np.reshape(np.power(ux, c+1), (n2 * m2), order='F')\n # print('d:{}, c:{}, mean:{}'.format(d, c, np.mean(Theta[:, d * C + c])))\n\n if d > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + \\\n 'u_{' + ''.join(['x' for _ in range(d)]) + '}'\n else:\n rhs_description[d * C + c] = rhs_description[d * C + c] + 'u'\n\n if c > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + '^' + str(c+1)\n\n # print(rhs_description)\n features, rhs = create_cross_features(Theta, rhs_description)\n features = np.concatenate((Theta, features), 1)\n rhs = np.concatenate((rhs_description, rhs), 0)\n\n return ut, features, rhs", "def ExplicitFixedStepIntegrator(f,times=None,a=None,b=None,c=None):\n \n if not(isinstance(times,DMatrix)):\n times = DMatrix(times)\n \n \n def toSX(a):\n return casadi.reshape(SXMatrix(a),a.shape[0],a.shape[1])\n \n \n times = toSX(times)\n a = toSX(a)\n b = toSX(b)\n c = toSX(c)\n \n x_init = f.inputSX(ODE_Y)\n N = x_init.numel()\n p = f.inputSX(ODE_P)\n \n s=b.numel()\n assert(a.size1()==s-1)\n assert(a.size2()==s-1)\n assert(c.numel()==s)\n \n if s>1:\n for lhs,rhs in zip(c[1:,0],casadi.sum(a,1)):\n pass\n #assert(lhs==rhs)\n \n ks = SXMatrix(N,s)\n y = x_init\n \n for k in range(len(times)-1):\n t = times[k]\n h = times[k+1]-times[k]\n for i in range(s):\n if i>0:\n x = y + casadi.dot(ks[:,:i],a[i-1,:i].T)*h\n else:\n x = y\n ks[:,i] = f.eval({ODE_T: t+c[i,0]*h, ODE_Y: x, ODE_P: p})[0]\n y+= casadi.dot(ks,b)*h\n \n return SXFunction([x_init,p],[y])", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an", "def __init__(self,dt,dynamicsf,h_or_g,xlims,alims,iEC,fname,d1_over=0.1,\\\n d2_over=0.1,da=0.1,Nx=1000,Nls=100,plims=np.empty((2,0))):\n self.dt = dt\n if (len(sig(dynamicsf).parameters) == 1):\n self.dynamicsf = lambda x,p: dynamicsf(x)\n else:\n self.dynamicsf = dynamicsf\n if (len(sig(h_or_g).parameters) == 1):\n self.h_or_g = lambda x,p: h_or_g(x)\n else:\n self.h_or_g = h_or_g\n self.xlims = xlims\n self.alims = alims\n self.iEC = iEC\n self.fname = fname\n self.d1_over = d1_over\n self.d2_over = d2_over\n self.da = da\n self.Nx = Nx\n self.Nls = Nls\n self.n = np.size(xlims,1)\n self.m = np.size(self.h_or_g(xlims[0,:],plims[0,:]).T,0)\n self.n_p = np.size(plims,1)\n self.Afun= lambda x,p: self.jacobian(x,p,self.dynamicsf)\n if self.iEC == \"est\":\n self.Cfun= lambda x,p: self.jacobian(x,p,self.h_or_g)\n self.Bw = lambda x,p: np.identity(self.n)\n self.Gw = lambda x,p: np.identity(self.m)\n elif self.iEC == \"con\":\n self.Bw = self.h_or_g\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.epsilon = 0\n self.dt_rk = 0.01\n self.plims = plims", "def integrator_model(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_model()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return solver", "def __init__(self, universe, molecule, step_size, sampling_universe=None, \\\n **options):\n Dynamics.Integrator.__init__(self, universe, options)\n # Supported features: none for the moment, to keep it simple\n self.features = []\n\n self.molecule = molecule\n self.step_size = step_size\n self.sampling_universe = sampling_universe", "def __init__(self, function_space, element):\n self.solution = fenics.Function(function_space)\n \n self.time = 0.\n \n self.function_space = function_space\n \n self.element = element", "def initialize(M):\n\n t = T.scalar()\n dgamma = T.matrix() # velocity of Euclidean curve\n dsm = T.matrix() # derivative of Euclidean semimartingale\n u = M.FM_element()\n d = M.dim\n\n # Deterministic development\n def ode_development(dgamma,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n det = T.tensordot(M.Horizontal(u)[:,0:m], dgamma, axes = [1,0])\n \n return det\n\n M.development = lambda u,dgamma: integrate(ode_development,u,dgamma)\n M.developmentf = theano.function([u,dgamma], M.development(u,dgamma))\n\n # Stochastic development\n def sde_development(dsm,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n sto = T.tensordot(M.Horizontal(u)[:,0:m], dsm, axes = [1,0])\n \n return (T.zeros_like(sto), sto, M.Horizontal(u)[:,0:m])\n\n M.sde_development = sde_development\n M.sde_developmentf = theano.function([dsm,t,u], M.sde_development(dsm,t,u), on_unused_input = 'ignore') \n M.stochastic_development = lambda u,dsm: integrate_sde(sde_development,integrator_stratonovich,u,dsm)\n M.stochastic_developmentf = theano.function([u,dsm], M.stochastic_development(u,dsm))", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, sigma=0.1 * simtk.unit.angstroms, timestep=1 * simtk.unit.femtoseconds):\n\n # Create a new Custom integrator.\n super(MetropolisMonteCarloIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma_x\", sigma) # perturbation size\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (set later)\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n\n #\n # Context state update.\n #\n self.addUpdateContextState()\n\n #\n # Update velocities from Maxwell-Boltzmann distribution.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"v\", \"sigma_v*gaussian\")\n self.addConstrainVelocities()\n\n #\n # propagation steps\n #\n # Store old positions and energy.\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputeGlobal(\"Eold\", \"energy\")\n # Gaussian particle displacements.\n self.addComputePerDof(\"x\", \"x + sigma_x*gaussian\")\n # Accept or reject with Metropolis criteria.\n self.addComputeGlobal(\"accept\", \"step(exp(-(energy-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"(1-accept)*xold + x*accept\")\n # Accumulate acceptance statistics.\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]" ]
[ "0.69303405", "0.59939796", "0.57994497", "0.57433486", "0.57079077", "0.5678165", "0.56725657", "0.5661507", "0.56537825", "0.56398034", "0.5597277", "0.55902517", "0.55843586", "0.55814993", "0.55416745", "0.5530678", "0.5516298", "0.5508994", "0.55017346", "0.55017346", "0.55017346", "0.55017346", "0.5500382", "0.5486106", "0.5478085", "0.5470905", "0.54703486", "0.5470192", "0.5463006", "0.5451436" ]
0.68241906
1
This function constructs the integrator to be suitable with casadi environment, for the equations of the model and the objective function with variable time step.
def integrator_model(self): xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \ = self.DAE_system() ODEeq_ = vertcat(*ODEeq) self.ODEeq = Function('f', [xd, u], [vertcat(*ODEeq)], ['x0', 'p'], ['xdot']) dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u), 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)} opts = {'tf': self.tf / self.nk} # interval length F = integrator('F', 'idas', dae, opts) # model = functools.partial(solver, np.zeros(np.shape(xa))) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integrator_model(self):\n\n xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \\\n = self.DAE_system()\n\n dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),\n 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}\n opts = {'tf': self.tf / self.nk} # interval length\n F = integrator('F', 'idas', dae, opts)\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return F", "def integrator_model(self):\n\n xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \\\n = self.DAE_system()\n\n dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),\n 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}\n opts = {'tf': self.tf / self.nk} # interval length\n F = integrator('F', 'idas', dae, opts)\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return F", "def integrate_casadi(self, problem, y0, t_eval, mass_matrix=None):\n options = {\n \"grid\": t_eval,\n \"reltol\": self.rtol,\n \"abstol\": self.atol,\n \"output_t0\": True,\n }\n options.update(self.extra_options)\n if self.method == \"idas\":\n options[\"calc_ic\"] = True\n\n # set up and solve\n integrator = casadi.integrator(\"F\", self.method, problem, options)\n try:\n # Try solving\n len_rhs = problem[\"x\"].size()[0]\n y0_diff, y0_alg = np.split(y0, [len_rhs])\n sol = integrator(x0=y0_diff, z0=y0_alg)\n y_values = np.concatenate([sol[\"xf\"].full(), sol[\"zf\"].full()])\n return pybamm.Solution(t_eval, y_values, None, None, \"final time\")\n except RuntimeError as e:\n # If it doesn't work raise error\n raise pybamm.SolverError(e.args[0])", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, nsteps=10, timestep=1 * simtk.unit.femtoseconds):\n\n super(HMCIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow Context updating here, outside of inner loop only.\n #\n self.addUpdateContextState()\n\n #\n # Draw new velocity.\n #\n self.addComputePerDof(\"v\", \"sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Store old position and energy.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n\n #\n # Inner symplectic steps using velocity Verlet.\n #\n for step in range(nsteps):\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Accept/reject step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)#, {'error_on_fail':False})\n\n return solver", "def integrate(self, t):", "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)", "def __init__(self,head_offset=0,aquifer_type='unconfined',domain_center=0+0j,\r\n domain_radius=1,H = None,variables=[],priors=[],observations=[]):\r\n \r\n import numpy as np\r\n \r\n # Set potential scaling variables\r\n self.head_offset = head_offset\r\n self.aquifer_type = aquifer_type\r\n self.H = H\r\n \r\n # Set domain scaling variables\r\n self.domain_center = domain_center\r\n self.domain_radius = domain_radius\r\n \r\n if not np.isscalar(self.domain_center):\r\n self.domain_center = self.domain_center[0] + 1j*self.domain_center[1]\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n # Define a list for Analytic Elements\r\n self.elementlist = []\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n self.observations = observations\r\n \r\n # This function scrapes the model and its elements for unknown variables,\r\n # then gives this instance three new variables:\r\n # self.num_params Number of unknown variables\r\n # self.params List of unknown variables\r\n # self.param_names List of names of unknown variables\r\n # self.priors List of prior dictionaries for unknow variables\r\n self.take_parameter_inventory()\r\n \r\n self.linear_solver = False\r\n \r\n # Pre-allocate the function matrix and parameter vector for the linear solver\r\n self.matrix_solver = []\r\n self.params_vector = []", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def integrate(self, dt):\n f = lambda t, y: self.Cm * (np.dot(self.phi, self.activation(y)) + np.dot(self.phi_input, self.activation(self.input)) - self.Gm * y)\n self.Vm = rk4(self.t, self.Vm, self.t + dt, f)", "def __init__(self,delta, advect, diffuse, maxvel, dim=1):\n\n print 'Initializing RK4 integrator';\n self.dx = delta;\n self.cflConstant = 0.5;\n if(dim == 3):\n print 'In 3D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate3D;\n self.maxvelocity = maxvel;\n\n elif(dim == 2):\n print 'In 2D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate2D;\n self.maxvelocity = maxvel;\n\n elif (dim == 1):\n print 'In 1D mode';\n self.dfdt = advect[0];\n self.diffusion = diffuse[0];\n self.integrate = self.integrate1D;\n self.maxvelocity = maxvel;\n print 'Done';", "def C(self,t,K,c0):\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('lsoda')\n #ode(self.dc_dt,c0,t,args=(k,)).set_integrator('vode', method='bdf', order=15)\n \n # if we have any negative times we assume they occur before the \n # reaction starts hence all negative times are assigned concentration \n # c0\n \n ## could switch to something like ode15s that the oiginal matlab code \n ## uses - can odeint cope with equations as stiff as we need?\n ## to use integrate.ode need order of arguments in dc_dt to switch\n \n #r = scipy.integrate.ode(self.dc_dt)\n #r = r.set_integrator('vode', method='bdf', order=15,nsteps=3000)\n #r = r.set_initial_value(c0)\n #r = r.set_f_params((K,))\n #r.integrate(t)\n \n static_times = t[t<0]\n dynamic_times = t[t>=0]\n\n static_C = np.array([c0 for _ in static_times])\n\n # odeint always takes the first time point as t0\n # our t0 is always 0 (removing t0 occures before we integrate)\n # so if the first time point is not 0 we add it \n \n if not dynamic_times.any() or dynamic_times[0]:\n #fancy indexing returns a copy so we can do this\n dynamic_times = np.hstack([[0],dynamic_times]) \n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))[1:]\n else:\n dynamic_C = odeint(self.dc_dt,c0,dynamic_times,args=(K,))\n \n if static_C.any():\n return np.vstack([static_C,dynamic_C])\n else:\n return dynamic_C", "def j(U, Q, mesh, T, num_steps, params):\n \n # Define parameters for cost functional\n alpha = params[\"alpha\"]\n u_d = params[\"u_d\"]\n \n # Compute integrals with time\n I1 = 0\n I3 = 0\n \n t = 0\n dt = T/num_steps\n for i in range(num_steps + 1):\n I1_int = assemble((U[i] - u_d[i])*(U[i] - u_d[i])*dx(mesh))\n I3_int = assemble(Q[i]*Q[i]*dx(mesh))\n \n if i == 0 or i == num_steps:\n I1_int *= 0.5\n I3_int *= 0.5\n \n I1 += I1_int\n I3 += I3_int\n \n t += dt\n \n \n I1 *= dt\n I3 *= dt*alpha/2\n \n # Compute end time integral\n \n print(\"Cost Functional Data\")\n print(\"I1: {}\".format(I1))\n print(\"I3: {}\".format(I3))\n print()\n \n return I1 + I3", "def __init__(\n self,\n biorbd_model,\n dynamics_type,\n number_shooting_points,\n phase_time,\n X_init,\n U_init,\n X_bounds,\n U_bounds,\n objective_functions=ObjectiveList(),\n constraints=ConstraintList(),\n parameters=ParameterList(),\n external_forces=(),\n ode_solver=OdeSolver.RK,\n nb_integration_steps=5,\n control_type=ControlType.CONSTANT,\n all_generalized_mapping=None,\n q_mapping=None,\n q_dot_mapping=None,\n tau_mapping=None,\n plot_mappings=None,\n state_transitions=StateTransitionList(),\n nb_threads=1,\n use_SX=False,\n ):\n\n if isinstance(biorbd_model, str):\n biorbd_model = [biorbd.Model(biorbd_model)]\n elif isinstance(biorbd_model, biorbd.biorbd.Model):\n biorbd_model = [biorbd_model]\n elif isinstance(biorbd_model, (list, tuple)):\n biorbd_model = [biorbd.Model(m) if isinstance(m, str) else m for m in biorbd_model]\n else:\n raise RuntimeError(\"biorbd_model must either be a string or an instance of biorbd.Model()\")\n self.version = {\"casadi\": casadi.__version__, \"biorbd\": biorbd.__version__, \"biorbd_optim\": __version__}\n self.nb_phases = len(biorbd_model)\n\n biorbd_model_path = [m.path().relativePath().to_string() for m in biorbd_model]\n self.original_values = {\n \"biorbd_model\": biorbd_model_path,\n \"dynamics_type\": dynamics_type,\n \"number_shooting_points\": number_shooting_points,\n \"phase_time\": phase_time,\n \"X_init\": X_init,\n \"U_init\": U_init,\n \"X_bounds\": X_bounds,\n \"U_bounds\": U_bounds,\n \"objective_functions\": ObjectiveList(),\n \"constraints\": ConstraintList(),\n \"parameters\": ParameterList(),\n \"external_forces\": external_forces,\n \"ode_solver\": ode_solver,\n \"nb_integration_steps\": nb_integration_steps,\n \"control_type\": control_type,\n \"all_generalized_mapping\": all_generalized_mapping,\n \"q_mapping\": q_mapping,\n \"q_dot_mapping\": q_dot_mapping,\n \"tau_mapping\": tau_mapping,\n \"plot_mappings\": plot_mappings,\n \"state_transitions\": state_transitions,\n \"nb_threads\": nb_threads,\n \"use_SX\": use_SX,\n }\n\n # Check integrity of arguments\n if not isinstance(nb_threads, int) or isinstance(nb_threads, bool) or nb_threads < 1:\n raise RuntimeError(\"nb_threads should be a positive integer greater or equal than 1\")\n\n if isinstance(dynamics_type, DynamicsTypeOption):\n dynamics_type_tp = DynamicsTypeList()\n dynamics_type_tp.add(dynamics_type)\n dynamics_type = dynamics_type_tp\n elif not isinstance(dynamics_type, DynamicsTypeList):\n raise RuntimeError(\"dynamics_type should be a DynamicsTypeOption or a DynamicsTypeList\")\n\n ns = number_shooting_points\n if not isinstance(ns, int) or ns < 2:\n if isinstance(ns, (tuple, list)):\n if sum([True for i in ns if not isinstance(i, int) and not isinstance(i, bool)]) != 0:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n else:\n raise RuntimeError(\n \"number_shooting_points should be a positive integer (or a list of) greater or equal than 2\"\n )\n nstep = nb_integration_steps\n if not isinstance(nstep, int) or isinstance(nstep, bool) or nstep < 1:\n raise RuntimeError(\"nb_integration_steps should be a positive integer greater or equal than 1\")\n\n if not isinstance(phase_time, (int, float)):\n if isinstance(phase_time, (tuple, list)):\n if sum([True for i in phase_time if not isinstance(i, (int, float))]) != 0:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n else:\n raise RuntimeError(\"phase_time should be a number or a list of number\")\n\n if isinstance(X_init, InitialConditionsOption):\n X_init_tp = InitialConditionsList()\n X_init_tp.add(X_init)\n X_init = X_init_tp\n elif not isinstance(X_init, InitialConditionsList):\n raise RuntimeError(\"X_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(U_init, InitialConditionsOption):\n U_init_tp = InitialConditionsList()\n U_init_tp.add(U_init)\n U_init = U_init_tp\n elif not isinstance(U_init, InitialConditionsList):\n raise RuntimeError(\"U_init should be built from a InitialConditionsOption or InitialConditionsList\")\n\n if isinstance(X_bounds, BoundsOption):\n X_bounds_tp = BoundsList()\n X_bounds_tp.add(X_bounds)\n X_bounds = X_bounds_tp\n elif not isinstance(X_bounds, BoundsList):\n raise RuntimeError(\"X_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(U_bounds, BoundsOption):\n U_bounds_tp = BoundsList()\n U_bounds_tp.add(U_bounds)\n U_bounds = U_bounds_tp\n elif not isinstance(U_bounds, BoundsList):\n raise RuntimeError(\"U_bounds should be built from a BoundOption or a BoundsList\")\n\n if isinstance(objective_functions, ObjectiveOption):\n objective_functions_tp = ObjectiveList()\n objective_functions_tp.add(objective_functions)\n objective_functions = objective_functions_tp\n elif not isinstance(objective_functions, ObjectiveList):\n raise RuntimeError(\"objective_functions should be built from an ObjectiveOption or ObjectiveList\")\n\n if isinstance(constraints, ConstraintOption):\n constraints_tp = ConstraintList()\n constraints_tp.add(constraints)\n constraints = constraints_tp\n elif not isinstance(constraints, ConstraintList):\n raise RuntimeError(\"constraints should be built from an ConstraintOption or ConstraintList\")\n\n if not isinstance(parameters, ParameterList):\n raise RuntimeError(\"parameters should be built from an ParameterList\")\n\n if not isinstance(state_transitions, StateTransitionList):\n raise RuntimeError(\"state_transitions should be built from an StateTransitionList\")\n\n if not isinstance(ode_solver, OdeSolver):\n raise RuntimeError(\"ode_solver should be built an instance of OdeSolver\")\n\n if not isinstance(use_SX, bool):\n raise RuntimeError(\"use_SX should be a bool\")\n\n # Declare optimization variables\n self.J = []\n self.g = []\n self.g_bounds = []\n self.V = []\n self.V_bounds = Bounds(interpolation=InterpolationType.CONSTANT)\n self.V_init = InitialConditions(interpolation=InterpolationType.CONSTANT)\n self.param_to_optimize = {}\n\n # nlp is the core of a phase\n self.nlp = [{} for _ in range(self.nb_phases)]\n self.__add_to_nlp(\"model\", biorbd_model, False)\n self.__add_to_nlp(\"phase_idx\", [i for i in range(self.nb_phases)], False)\n\n # Type of CasADi graph\n if use_SX:\n self.CX = SX\n else:\n self.CX = MX\n\n # Define some aliases\n self.__add_to_nlp(\"ns\", number_shooting_points, False)\n for nlp in self.nlp:\n if nlp[\"ns\"] < 1:\n raise RuntimeError(\"Number of shooting points must be at least 1\")\n self.initial_phase_time = phase_time\n phase_time, initial_time_guess, time_min, time_max = self.__init_phase_time(\n phase_time, objective_functions, constraints\n )\n self.__add_to_nlp(\"tf\", phase_time, False)\n self.__add_to_nlp(\"t0\", [0] + [nlp[\"tf\"] for i, nlp in enumerate(self.nlp) if i != len(self.nlp) - 1], False)\n self.__add_to_nlp(\"dt\", [self.nlp[i][\"tf\"] / max(self.nlp[i][\"ns\"], 1) for i in range(self.nb_phases)], False)\n self.nb_threads = nb_threads\n self.__add_to_nlp(\"nb_threads\", nb_threads, True)\n self.solver_type = Solver.NONE\n self.solver = None\n\n # External forces\n if external_forces != ():\n external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)\n self.__add_to_nlp(\"external_forces\", external_forces, False)\n\n # Compute problem size\n if all_generalized_mapping is not None:\n if q_mapping is not None or q_dot_mapping is not None or tau_mapping is not None:\n raise RuntimeError(\"all_generalized_mapping and a specified mapping cannot be used alongside\")\n q_mapping = q_dot_mapping = tau_mapping = all_generalized_mapping\n self.__add_to_nlp(\"q_mapping\", q_mapping, q_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"q_dot_mapping\", q_dot_mapping, q_dot_mapping is None, BidirectionalMapping)\n self.__add_to_nlp(\"tau_mapping\", tau_mapping, tau_mapping is None, BidirectionalMapping)\n plot_mappings = plot_mappings if plot_mappings is not None else {}\n reshaped_plot_mappings = []\n for i in range(self.nb_phases):\n reshaped_plot_mappings.append({})\n for key in plot_mappings:\n reshaped_plot_mappings[i][key] = plot_mappings[key][i]\n self.__add_to_nlp(\"plot_mappings\", reshaped_plot_mappings, False)\n\n # Prepare the parameters to optimize\n self.state_transitions = []\n if len(parameters) > 0:\n self.update_parameters(parameters)\n\n # Declare the time to optimize\n self.__define_variable_time(initial_time_guess, time_min, time_max)\n\n # Prepare the dynamics of the program\n self.__add_to_nlp(\"dynamics_type\", dynamics_type, False)\n self.__add_to_nlp(\"ode_solver\", ode_solver, True)\n self.__add_to_nlp(\"control_type\", control_type, True)\n for i in range(self.nb_phases):\n self.__initialize_nlp(self.nlp[i])\n Problem.initialize(self, self.nlp[i])\n\n # Prepare path constraints\n self.__add_to_nlp(\"X_bounds\", X_bounds, False)\n self.__add_to_nlp(\"U_bounds\", U_bounds, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_bounds\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Prepare initial guesses\n self.__add_to_nlp(\"X_init\", X_init, False)\n self.__add_to_nlp(\"U_init\", U_init, False)\n for i in range(self.nb_phases):\n self.nlp[i][\"X_init\"].check_and_adjust_dimensions(self.nlp[i][\"nx\"], self.nlp[i][\"ns\"])\n if self.nlp[i][\"control_type\"] == ControlType.CONSTANT:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"] - 1)\n elif self.nlp[i][\"control_type\"] == ControlType.LINEAR_CONTINUOUS:\n self.nlp[i][\"U_init\"].check_and_adjust_dimensions(self.nlp[i][\"nu\"], self.nlp[i][\"ns\"])\n else:\n raise NotImplementedError(f\"Plotting {self.nlp[i]['control_type']} is not implemented yet\")\n\n # Variables and constraint for the optimization program\n for i in range(self.nb_phases):\n self.__define_multiple_shooting_nodes_per_phase(self.nlp[i], i)\n\n # Define dynamic problem\n self.__add_to_nlp(\n \"nb_integration_steps\", nb_integration_steps, True\n ) # Number of steps of integration (for now only RK4 steps are implemented)\n for i in range(self.nb_phases):\n if self.nlp[0][\"nx\"] != self.nlp[i][\"nx\"] or self.nlp[0][\"nu\"] != self.nlp[i][\"nu\"]:\n raise RuntimeError(\"Dynamics with different nx or nu is not supported yet\")\n self.__prepare_dynamics(self.nlp[i])\n\n # Prepare phase transitions (Reminder, it is important that parameters are declared\n # before, otherwise they will erase the state_transitions)\n self.state_transitions = StateTransitionFunctions.prepare_state_transitions(self, state_transitions)\n\n # Inner- and inter-phase continuity\n ContinuityFunctions.continuity(self)\n\n # Prepare constraints\n self.update_constraints(constraints)\n\n # Prepare objectives\n self.update_objectives(objective_functions)", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Initialize constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(GHMCIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"vold\", 0) # old velocities\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Constrain positions.\n #\n self.addConstrainPositions()\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputePerDof(\"vold\", \"v\")\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n self.addComputePerDof(\"v\", \"v*accept - vold*(1-accept)\")\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def Optimize_tau(x,dt):\n\n xdot = central_diff(x,dt)\n var_xdot = np.var(xdot)\n tau_target = CorrelationTime(x,dt=dt)\n\n \n k = var_xdot\n beta0 = 0.1\n alpha = 1.0/k # beta/D ratio\n\n @jit\n def f1(x): # spring force\n return k*x\n \n MySys = Stochastic_Oscillator(f1,beta0,beta0 / alpha)\n R = 5000 # how many taus required for integration\n MySys.Match_Correlation(tau_target,np.array([1e-2,20]),alpha,T=R*tau_target,N=int(R*100))\n MySys.k=k\n\n return MySys", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Compute constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(VVVRIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()", "def build_system(u, dt, dx, D=4, C=1, time_diff='FD', space_diff='FD', width_x=None,\n width_t=None, deg_x=5, deg_t=None):\n\n n, m = u.shape\n\n if width_x == None: width_x = n / 10\n if width_t == None: width_t = m / 10\n if deg_t == None: deg_t = deg_x\n\n # If we're using polynomials to take derviatives, then we toss the data around the edges.\n if time_diff == 'poly':\n m2 = m - 2 * width_t\n offset_t = width_t\n else:\n m2 = m\n offset_t = 0\n if space_diff == 'poly':\n n2 = n - 2 * width_x\n offset_x = width_x\n else:\n n2 = n\n offset_x = 0\n\n ########################\n # First take the time derivaitve for the left hand side of the equation\n ########################\n ut = np.zeros((n2, m2), dtype=np.complex64)\n\n if time_diff == 'poly':\n T = np.linspace(0, (m - 1) * dt, m)\n for i in range(n2):\n ut[i, :] = PolyDiff(u[i + offset_x, :], T, diff=1, width=width_t, deg=deg_t)[:, 0]\n\n else:\n for i in range(n2):\n ut[i, :] = FiniteDiff(u[i + offset_x, :], dt, 1)\n\n ut = np.reshape(ut, (n2 * m2, 1), order='F')\n\n ########################\n # Now form the rhs one column at a time, and record what each one is\n ########################\n\n u2 = u[offset_x:n - offset_x, offset_t:m - offset_t]\n Theta = np.zeros((n2 * m2, (D + 1) * C), dtype=np.complex64)\n ux = np.zeros((n2, m2), dtype=np.complex64)\n rhs_description = ['' for i in range((D + 1) * C)]\n\n if space_diff == 'poly':\n Du = {}\n for i in range(m2):\n Du[i] = PolyDiff(u[:, i + offset_t], np.linspace(0, (n - 1) * dx, n), diff=D, width=width_x, deg=deg_x)\n if space_diff == 'Fourier': ik = 1j * np.fft.fftfreq(n) * n\n\n for d in range(D + 1):\n # compute derivatives of d degree\n if d > 0:\n for i in range(m2):\n if space_diff == 'FD':\n ux[:, i] = FiniteDiff(u[:, i + offset_t], dx, d)\n elif space_diff == 'poly':\n ux[:, i] = Du[i][:, d - 1]\n else:\n ux = np.array(u2, dtype=np.complex64)\n # if d == 1: print(ux)\n\n # compute polynomials of all terms, c used as c+1\n for c in range(C):\n Theta[:, d * C + c] = np.reshape(np.power(ux, c+1), (n2 * m2), order='F')\n # print('d:{}, c:{}, mean:{}'.format(d, c, np.mean(Theta[:, d * C + c])))\n\n if d > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + \\\n 'u_{' + ''.join(['x' for _ in range(d)]) + '}'\n else:\n rhs_description[d * C + c] = rhs_description[d * C + c] + 'u'\n\n if c > 0:\n rhs_description[d * C + c] = rhs_description[d * C + c] + '^' + str(c+1)\n\n # print(rhs_description)\n features, rhs = create_cross_features(Theta, rhs_description)\n features = np.concatenate((Theta, features), 1)\n rhs = np.concatenate((rhs_description, rhs), 0)\n\n return ut, features, rhs", "def ExplicitFixedStepIntegrator(f,times=None,a=None,b=None,c=None):\n \n if not(isinstance(times,DMatrix)):\n times = DMatrix(times)\n \n \n def toSX(a):\n return casadi.reshape(SXMatrix(a),a.shape[0],a.shape[1])\n \n \n times = toSX(times)\n a = toSX(a)\n b = toSX(b)\n c = toSX(c)\n \n x_init = f.inputSX(ODE_Y)\n N = x_init.numel()\n p = f.inputSX(ODE_P)\n \n s=b.numel()\n assert(a.size1()==s-1)\n assert(a.size2()==s-1)\n assert(c.numel()==s)\n \n if s>1:\n for lhs,rhs in zip(c[1:,0],casadi.sum(a,1)):\n pass\n #assert(lhs==rhs)\n \n ks = SXMatrix(N,s)\n y = x_init\n \n for k in range(len(times)-1):\n t = times[k]\n h = times[k+1]-times[k]\n for i in range(s):\n if i>0:\n x = y + casadi.dot(ks[:,:i],a[i-1,:i].T)*h\n else:\n x = y\n ks[:,i] = f.eval({ODE_T: t+c[i,0]*h, ODE_Y: x, ODE_P: p})[0]\n y+= casadi.dot(ks,b)*h\n \n return SXFunction([x_init,p],[y])", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an", "def __init__(self,dt,dynamicsf,h_or_g,xlims,alims,iEC,fname,d1_over=0.1,\\\n d2_over=0.1,da=0.1,Nx=1000,Nls=100,plims=np.empty((2,0))):\n self.dt = dt\n if (len(sig(dynamicsf).parameters) == 1):\n self.dynamicsf = lambda x,p: dynamicsf(x)\n else:\n self.dynamicsf = dynamicsf\n if (len(sig(h_or_g).parameters) == 1):\n self.h_or_g = lambda x,p: h_or_g(x)\n else:\n self.h_or_g = h_or_g\n self.xlims = xlims\n self.alims = alims\n self.iEC = iEC\n self.fname = fname\n self.d1_over = d1_over\n self.d2_over = d2_over\n self.da = da\n self.Nx = Nx\n self.Nls = Nls\n self.n = np.size(xlims,1)\n self.m = np.size(self.h_or_g(xlims[0,:],plims[0,:]).T,0)\n self.n_p = np.size(plims,1)\n self.Afun= lambda x,p: self.jacobian(x,p,self.dynamicsf)\n if self.iEC == \"est\":\n self.Cfun= lambda x,p: self.jacobian(x,p,self.h_or_g)\n self.Bw = lambda x,p: np.identity(self.n)\n self.Gw = lambda x,p: np.identity(self.m)\n elif self.iEC == \"con\":\n self.Bw = self.h_or_g\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.epsilon = 0\n self.dt_rk = 0.01\n self.plims = plims", "def integrator_model(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_model()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n # model = functools.partial(solver, np.zeros(np.shape(xa)))\n return solver", "def initialize(M):\n\n t = T.scalar()\n dgamma = T.matrix() # velocity of Euclidean curve\n dsm = T.matrix() # derivative of Euclidean semimartingale\n u = M.FM_element()\n d = M.dim\n\n # Deterministic development\n def ode_development(dgamma,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n det = T.tensordot(M.Horizontal(u)[:,0:m], dgamma, axes = [1,0])\n \n return det\n\n M.development = lambda u,dgamma: integrate(ode_development,u,dgamma)\n M.developmentf = theano.function([u,dgamma], M.development(u,dgamma))\n\n # Stochastic development\n def sde_development(dsm,t,u):\n x = u[0:d]\n nu = u[d:].reshape((d,-1))\n m = nu.shape[1]\n\n sto = T.tensordot(M.Horizontal(u)[:,0:m], dsm, axes = [1,0])\n \n return (T.zeros_like(sto), sto, M.Horizontal(u)[:,0:m])\n\n M.sde_development = sde_development\n M.sde_developmentf = theano.function([dsm,t,u], M.sde_development(dsm,t,u), on_unused_input = 'ignore') \n M.stochastic_development = lambda u,dsm: integrate_sde(sde_development,integrator_stratonovich,u,dsm)\n M.stochastic_developmentf = theano.function([u,dsm], M.stochastic_development(u,dsm))", "def __init__(self, universe, molecule, step_size, sampling_universe=None, \\\n **options):\n Dynamics.Integrator.__init__(self, universe, options)\n # Supported features: none for the moment, to keep it simple\n self.features = []\n\n self.molecule = molecule\n self.step_size = step_size\n self.sampling_universe = sampling_universe", "def __init__(self, function_space, element):\n self.solution = fenics.Function(function_space)\n \n self.time = 0.\n \n self.function_space = function_space\n \n self.element = element", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, sigma=0.1 * simtk.unit.angstroms, timestep=1 * simtk.unit.femtoseconds):\n\n # Create a new Custom integrator.\n super(MetropolisMonteCarloIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma_x\", sigma) # perturbation size\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (set later)\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n\n #\n # Context state update.\n #\n self.addUpdateContextState()\n\n #\n # Update velocities from Maxwell-Boltzmann distribution.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"v\", \"sigma_v*gaussian\")\n self.addConstrainVelocities()\n\n #\n # propagation steps\n #\n # Store old positions and energy.\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputeGlobal(\"Eold\", \"energy\")\n # Gaussian particle displacements.\n self.addComputePerDof(\"x\", \"x + sigma_x*gaussian\")\n # Accept or reject with Metropolis criteria.\n self.addComputeGlobal(\"accept\", \"step(exp(-(energy-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"(1-accept)*xold + x*accept\")\n # Accumulate acceptance statistics.\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")" ]
[ "0.6824473", "0.6824473", "0.5993993", "0.5799238", "0.5743481", "0.5707509", "0.56789947", "0.56719893", "0.5662057", "0.56536293", "0.5639711", "0.55976707", "0.55896133", "0.5583953", "0.5581527", "0.5541686", "0.5530698", "0.55166656", "0.5509524", "0.55018413", "0.55018413", "0.55018413", "0.55018413", "0.5501527", "0.5484977", "0.5478345", "0.54705423", "0.5470515", "0.5469135", "0.5462439" ]
0.69306874
0
Sort the 4 corners clockwise of a rectangle so that the topleft corner is the first one.
def sort_corners(self, corners: np.ndarray): center = np.sum(corners, axis=0) / 4 sorted_corners = sorted( corners, key=lambda p: math.atan2(p[0][0] - center[0][0], p[0][1] - center[0][1]), reverse=True, ) return np.roll(sorted_corners, 1, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_corners(corners):\n col_sorted = corners[np.argsort(corners[:, 1])] # sort on the value in column\n\n # sort on the value in rows. a, b are the indexes\n a = np.argsort(col_sorted[:2, 0])\n b = np.argsort(col_sorted[2:, 0]) + 2\n\n return col_sorted[np.hstack((a, b))]", "def order_rect(pts):\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new", "def order_points(corners):\n top_left = corners.sum(1).argmin()\n bottom_right = corners.sum(1).argmax()\n top_right = np.diff(corners).argmin()\n bottom_left = np.diff(corners).argmax()\n\n ordered = np.array([corners[top_left], corners[top_right], corners[bottom_left], corners[bottom_right]], dtype = \"float32\")\n\n return ordered", "def reorder(self, corners) -> np.array:\n ordered_corners = np.zeros((4, 2), dtype=np.float32)\n sums = corners.sum(1)\n ordered_corners[0] = corners[np.argmin(sums)]\n ordered_corners[2] = corners[np.argmax(sums)]\n diffs = np.diff(corners, axis=1)\n ordered_corners[1] = corners[np.argmin(diffs)]\n ordered_corners[3] = corners[np.argmax(diffs)]\n return ordered_corners", "def rectangleCoordinates(c1, c2):\n if c1[3] > c2[3]:\n if c1[2] > c2[2]:\n if c1[1] < c2[1]:\n if c1[0] < c2[0]:\n return (c1[0], c1[1], c1[2], c1[3])\n else:\n return (c2[0], c1[1], c1[2], c1[3])\n else:\n if c1[0] < c2[0]:\n return (c1[0], c2[1], c1[2], c1[3])\n else:\n return (c2[0], c2[1], c1[2], c1[3])\n else:\n if c1[1] < c2[1]:\n if c1[0] < c2[0]:\n return (c1[0], c1[1], c2[2], c1[3])\n else:\n return (c2[0], c1[1], c2[2], c1[3])\n else:\n if c1[0] < c2[0]:\n return (c1[0], c2[1], c2[2], c1[3])\n else:\n return (c2[0], c2[1], c2[2], c1[3])\n else:\n if c1[2] > c2[2]:\n if c1[1] < c2[1]:\n if c1[0] < c2[0]:\n return (c1[0], c1[1], c1[2], c2[3])\n else:\n return (c2[0], c1[1], c1[2], c2[3])\n else:\n if c1[0] < c2[0]:\n return (c1[0], c2[1], c1[2], c2[3])\n else:\n return (c2[0], c2[1], c1[2], c2[3])\n else:\n if c1[1] < c2[1]:\n if c1[0] < c2[0]:\n return (c1[0], c1[1], c2[2], c2[3])\n else:\n return (c2[0], c1[1], c2[2], c2[3])\n else:\n if c1[0] < c2[0]:\n return (c1[0], c2[1], c2[2], c2[3])\n else:\n return (c2[0], c2[1], c2[2], c2[3])", "def sortFourPoints(points, flip=False):\r\n\r\n # Find the sums and differences of each point's x,y values\r\n sums = []\r\n diffs = []\r\n for point in points:\r\n sums.append(point[0] + point[1])\r\n if flip:\r\n diffs.append(point[0] - point[1])\r\n else:\r\n diffs.append(point[1] - point[0])\r\n\r\n # Find the coordinates of each corner\r\n topLeft = points[np.argmin(sums)]\r\n topRight = points[np.argmin(diffs)]\r\n bottomLeft = points[np.argmax(diffs)]\r\n bottomRight = points[np.argmax(sums)]\r\n\r\n return [topLeft, topRight, bottomLeft, bottomRight]", "def get_top_corners(corners):\n top_corners = np.concatenate(\n [sorted(rect, key=getY)[:2] for rect in corners])\n return sorted(top_corners, key=getX)", "def order_rect_points(points):\n\n rect = np.zeros((4, 2), np.float32)\n\n s = points.sum(axis=1)\n rect[0] = points[np.argmin(s)]\n rect[2] = points[np.argmax(s)]\n\n d = np.diff(points, axis=1)\n rect[1] = points[np.argmin(d)]\n rect[3] = points[np.argmax(d)]\n return rect", "def order_points(pts):\n pts = np.array(pts)\n sums = pts.sum(axis=1)\n topleft_id = np.argmin(sums)\n bottomright_id = np.argmax(sums)\n\n # Quite clumsy, rewrite here\n leftover_ids = [i for range(4) if i not in (topleft_id, bottomright_id)]\n topright_id = min(leftover_ids, key=lambda i: pts[i][0])\n bottomleft_id = leftover_ids[0] if leftover_ids[0] != topright_id else leftover_ids[1]\n\n return pts[[topleft_id, topright_id, bottomright_id, bottomleft_id]]", "def order_points(pts):\n\n\trect = np.zeros((4, 2), dtype=\"float32\")\n\ts = pts.sum(axis=1)\n\trect[0] = pts[np.argmin(s)]\n\trect[2] = pts[np.argmax(s)]\n\tdiff = np.diff(pts, axis=1)\n\trect[1] = pts[np.argmin(diff)]\n\trect[3] = pts[np.argmax(diff)]\n\n\treturn rect", "def sort_clockwise(a):\n\n # get centroids, shape=(1,2=(cx,cy))\n center = a.mean(axis=0).reshape((1, 2))\n\n sorted_inds = np.argsort(np.arctan2(a[:, 1]-center[:, 1], a[:, 0]-center[:, 0]))\n\n return np.take(a, sorted_inds, axis=0)", "def order_points(pts):\n rect = np.zeros((4, 2), dtype=\"float32\")\n sum_pts = pts.sum(axis=1)\n rect[0] = pts[np.argmin(sum_pts)]\n rect[2] = pts[np.argmax(sum_pts)]\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n return rect", "def sort_clockwise(coordinates):\n center = tuple(map(op.truediv, reduce(lambda x_, y_: map(op.add, x_, y_), coordinates), [len(coordinates)] * 2))\n coordinates = sorted(coordinates, key=lambda coord: (-135 - np.degrees(\n np.arctan2(*tuple(map(op.sub, center, coord))[::-1]))) % 360)\n return coordinates", "def get_bottom_corners(corners):\n bottom_corners = np.concatenate(\n [sorted(rect, key=getY)[2:] for rect in corners])\n return sorted(bottom_corners, key=getX)", "def rect_horizontal_split(rect):\n\n upper = rect.copy()\n upper.height /= 2\n\n lower = rect.copy()\n lower.height = rect.height - upper.height\n\n lower.top = upper.bottom\n return (upper, lower)", "def _rectangle_corners(rectangle):\n corner_points = []\n for i1 in (.5, -.5):\n for i2 in (i1, -1 * i1):\n corner_points.append((rectangle['rectangle_center'][0] + i1 * rectangle['length_parallel'],\n rectangle['rectangle_center'][1] + i2 * rectangle['length_orthogonal']))\n\n return _rotate_points(rectangle['rectangle_center'], rectangle['unit_vector_angle'], corner_points)", "def contour_sort(l):\n length = len(l)\n if length <= 1:\n return l\n else:\n pivot = l.pop(int(length / 2))\n less, more = [], []\n for x in l:\n if cv2.contourArea(x) >= cv2.contourArea(pivot):\n less.append(x)\n else:\n more.append(x)\n return contour_sort(less) + [pivot] + contour_sort(more)", "def sortColors(self, nums: List[int]) -> None:\n left = -1\n right = len(nums)\n index = 0\n # since it is only 0,1,2, make 0 to the very left side, 2 to the very right side\n # will solve the problem\n while index < right:\n if nums[index] == 0:\n left += 1\n temp = nums[left]\n nums[left] = 0\n nums[index] = temp\n if left == index: index += 1\n elif nums[index] == 2:\n right -= 1\n temp = nums[right]\n nums[right] = 2\n nums[index] = temp\n else:\n index += 1", "def corners_asic(self):\n a = int(round(self.angle / 90.0)) % 4\n c = self.corners(True)\n\n if (a == 0):\n # The section is \"standing up\", and the top left corner is given\n # by the first corner.\n ul0 = [int(round(c[a][0])), int(round(c[a][1]))]\n ul1 = [ul0[0] + 194 + 3, ul0[1]]\n dlr = [194, 185]\n elif (a == 2):\n # The section is \"standing up\", and the top left corner is given\n # by the third corner.\n ul1 = [int(round(c[a][0])), int(round(c[a][1]))]\n ul0 = [ul1[0] + 194 + 3, ul1[1]]\n dlr = [194, 185]\n elif (a == 1):\n # The section is \"laying down\", and the top left corner is given\n # by the second corner.\n ul0 = [int(round(c[a][0])), int(round(c[a][1]))]\n ul1 = [ul0[0], ul0[1] + 194 + 3]\n dlr = [185, 194]\n elif (a == 3):\n # The section is \"laying down\", and the top left corner is given\n # by the forth corner.\n ul1 = [int(round(c[a][0])), int(round(c[a][1]))]\n ul0 = [ul1[0], ul1[1] + 194 + 3]\n dlr = [185, 194]\n\n coords = [\n [ul0[0], ul0[1], ul0[0] + dlr[0], ul0[1] + dlr[1]],\n [ul1[0], ul1[1], ul1[0] + dlr[0], ul1[1] + dlr[1]]]\n return (coords)", "def _confidence_interval_to_polygon(\n x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top,\n for_performance_diagram=False):\n\n nan_flags_top = numpy.logical_or(\n numpy.isnan(x_coords_top), numpy.isnan(y_coords_top))\n real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0]\n\n nan_flags_bottom = numpy.logical_or(\n numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom))\n real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0]\n\n if for_performance_diagram:\n y_coords_top = y_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(y_coords_top)\n y_coords_top = y_coords_top[sort_indices_top]\n x_coords_top = x_coords_top[real_indices_top][sort_indices_top]\n\n y_coords_bottom = y_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(-y_coords_bottom)\n y_coords_bottom = y_coords_bottom[sort_indices_bottom]\n x_coords_bottom = x_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n else:\n x_coords_top = x_coords_top[real_indices_top]\n sort_indices_top = numpy.argsort(-x_coords_top)\n x_coords_top = x_coords_top[sort_indices_top]\n y_coords_top = y_coords_top[real_indices_top][sort_indices_top]\n\n x_coords_bottom = x_coords_bottom[real_indices_bottom]\n sort_indices_bottom = numpy.argsort(x_coords_bottom)\n x_coords_bottom = x_coords_bottom[sort_indices_bottom]\n y_coords_bottom = y_coords_bottom[real_indices_bottom][\n sort_indices_bottom]\n\n polygon_x_coords = numpy.concatenate((\n x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]])))\n polygon_y_coords = numpy.concatenate((\n y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]])))\n\n return polygons.vertex_arrays_to_polygon_object(\n polygon_x_coords, polygon_y_coords)", "def normalizeRects(rects):\n\tsmallestX = min(rect[0] for rect in rects)\n\tsmallestY = min(rect[1] for rect in rects)\n\treturn list(\n\t\t(-smallestX + left,\n\t\t -smallestY + top,\n\t\t -smallestX + right,\n\t\t -smallestY + bottom) for left, top, right, bottom in rects\n\t)", "def get_pair_rects(contours):\n\n rect_pairs = []\n for index, cnt in enumerate(contours):\n # Rotated rect - ( center (x,y), (width, height), angle of rotation )\n rect = cv2.minAreaRect(cnt)\n center_x, center_y = rect[0]\n rect_angle = -round(rect[2], 2)\n\n if rect_angle > 45.0:\n # Iterate through all of the potential matches\n min_x_dist = min_rect = min_index = None\n for pot_index, pot_match in enumerate(contours):\n if np.array_equal(pot_match, cnt):\n continue\n\n match_rect = cv2.minAreaRect(pot_match)\n\n # Check if match is to the right of the contour\n if match_rect[0][0] > rect[0][0] and abs(\n match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG:\n x_distance = match_rect[0][0] - rect[0][0]\n\n if min_x_dist is None or x_distance < min_x_dist:\n min_x_dist = x_distance\n min_rect = match_rect\n min_index = pot_index\n\n if min_rect is not None:\n rect_pairs.append((rect, min_rect))\n np.delete(contours, index)\n np.delete(contours, min_index)\n\n return rect_pairs", "def test_sort_angles(self):\n\n nb_points = 5\n points = np.array([[1, 2], [1, 1], [2, 1], [3, 7], [7, 2]]) # example of points\n\n sorted_points = convex_hull.sort_angle(points) # sorted points \n right_sorted_points = np.array([[2, 1], [7, 2], [3, 7], [1, 2], [1, 1]])\n\n self.assertTrue((sorted_points == right_sorted_points).all())", "def sort_filtered_contours(self):\r\n\r\n # Get the contours again\r\n invert = 255 - self.thresh_invert\r\n real_contours = cv2.findContours(invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n real_contours = real_contours[0] if len(real_contours) == 2 else real_contours[1]\r\n\r\n # Make sure that they're within the correct range for size\r\n # If too small, it is probably noise; if too large, then should be things around the grid\r\n for i, c in enumerate(real_contours, 1):\r\n contour_area = cv2.contourArea(c)\r\n if self.min_cell_size < contour_area < self.max_cell_size:\r\n self.good_contours.append(c)\r\n\r\n # We assume a square board, so the number of rows/cols should be the square root of total contours/cells\r\n self.board_dimension = int(math.sqrt(len(self.good_contours)))\r\n\r\n # Sort the contours from top to bottom\r\n (half_sorted_contours, _) = contours.sort_contours(self.good_contours, method=\"top-to-bottom\")\r\n\r\n # We then sort each row from left to right\r\n row = []\r\n for i, c in enumerate(half_sorted_contours, 1):\r\n row.append(c)\r\n if i % self.board_dimension == 0:\r\n (full_sorted_contours, _) = contours.sort_contours(row, method=\"left-to-right\")\r\n self.game_board_contours.append(full_sorted_contours)\r\n row = []", "def asteroidCreator(numCorner,win):\n\n xCoor = []\n yCoor = []\n\n # Creating coordinates of the points\n coorRange = [i for i in range(-10,10) if i not in [0]] # to avoid 0\n\n for i in range(numCorner):\n xCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n yCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n\n # Sorting the coordinates\n bubbleSort(xCoor,len(xCoor))\n bubbleSort(yCoor,len(yCoor))\n\n\n # Isolating the extreme points\n xSmallest = xCoor.pop(0)\n xLargest = xCoor.pop()\n\n ySmallest = yCoor.pop(0)\n yLargest = yCoor.pop()\n\n # Shuffle the coordinates\n random.shuffle(xCoor)\n random.shuffle(yCoor)\n\n # Divide them into two sets\n xCoorLower = xCoor[:len(xCoor)//2]\n xCoorUpper = xCoor[len(xCoor)//2:]\n\n yCoorLower = yCoor[:len(yCoor)//2]\n yCoorUpper = yCoor[len(yCoor)//2:]\n\n # Append back the extreme points, and sort them again\n xCoorLower.append(xSmallest)\n xCoorLower.append(xLargest)\n xCoorUpper.append(xSmallest)\n xCoorUpper.append(xLargest)\n\n yCoorLower.append(ySmallest)\n yCoorLower.append(yLargest)\n yCoorUpper.append(ySmallest)\n yCoorUpper.append(yLargest)\n\n bubbleSort(xCoorLower,len(xCoorLower))\n bubbleSort(xCoorUpper,len(xCoorUpper))\n bubbleSort(yCoorLower,len(yCoorLower))\n bubbleSort(yCoorUpper,len(yCoorUpper))\n\n # Getting the vector lengths out of the points\n # We will get vectors in 4 directions from 4 lists\n xVectorLengths = []\n yVectorLengths = []\n\n for i in range(len(xCoorLower)-1):\n xVectorLengths.append(xCoorLower[i]-xCoorLower[i+1])\n for i in range(len(xCoorUpper)-1):\n xVectorLengths.append(xCoorUpper[i+1]-xCoorUpper[i])\n for i in range(len(yCoorLower)-1):\n yVectorLengths.append(yCoorLower[i]-yCoorLower[i+1])\n for i in range(len(yCoorUpper)-1):\n yVectorLengths.append(yCoorUpper[i+1]-yCoorUpper[i])\n\n random.shuffle(xVectorLengths)\n random.shuffle(yVectorLengths)\n\n # Creating the vectors\n vectors = []\n defaultVector = [0,0]\n\n for i in range(len(xVectorLengths)):\n defaultVector[0] = round(xVectorLengths[i],2)\n defaultVector[1] = round(yVectorLengths[i],2)\n vectors.append(defaultVector.copy())\n\n # Sorting vectors by their angle\n sortedVectors = []\n quadrant1 = []\n quadrant2 = []\n quadrant3 = []\n quadrant4 = []\n\n ### Dividing them by quadrants\n for vector in vectors:\n if vector[0] >= 0 and vector[1] >= 0:\n quadrant1.append(vector)\n elif vector[0] <= 0 and vector[1] >= 0:\n quadrant2.append(vector)\n elif vector[0] <= 0 and vector[1] <= 0:\n quadrant3.append(vector)\n elif vector[0] >= 0 and vector[1] <= 0:\n quadrant4.append(vector)\n\n ### Sorting them inside the quadrants\n quadrant1 = angleSort(quadrant1,1,len(quadrant1))\n quadrant2 = angleSort(quadrant2,2,len(quadrant2))\n quadrant3 = angleSort(quadrant3,3,len(quadrant3))\n quadrant4 = angleSort(quadrant4,4,len(quadrant4))\n\n ### Adding them up in order\n for vector in quadrant1:\n sortedVectors.append(vector)\n for vector in quadrant2:\n sortedVectors.append(vector)\n for vector in quadrant3:\n sortedVectors.append(vector)\n for vector in quadrant4:\n sortedVectors.append(vector)\n\n # Creating the points for the polygon\n points = []\n points = vectorsToPoints(sortedVectors,points)\n\n rightEdge = 0\n leftEdge = 0\n upperEdge = 0\n lowerEdge = 0\n\n # getting the boundaries for the asteroid\n for point in points:\n if point[0] > rightEdge:\n rightEdge = point[0]\n elif point[0] < leftEdge:\n leftEdge = point[0]\n if point[1] > upperEdge:\n upperEdge = point[1]\n elif point[1] < lowerEdge:\n lowerEdge = point[1]\n\n # Width and height are only required since it is a child of rotating_block class\n width = rightEdge - leftEdge\n height = upperEdge - lowerEdge\n\n centerPoint = [(rightEdge + leftEdge) / 2 , (upperEdge + lowerEdge) / 2]\n\n asteroid = pho.Asteroid(win,width,height,points,centerPoint[0],centerPoint[1])\n\n return asteroid", "def rectangleRotation(a, b):\r\n\r\n line2 = (-1, sqrt(a**2 / 2))\r\n line4 = (-1, -sqrt(a**2 / 2))\r\n\r\n line1 = (1, sqrt(b**2 / 2))\r\n line3 = (1, -sqrt(b**2 / 2))\r\n\r\n tot = 0\r\n\r\n print(line2, line1)\r\n print(line3, line4)\r\n\r\n for xpts in range(-b * a, b * a):\r\n for ypts in range(-a * b, a * b):\r\n if (isunder(xpts, ypts, line1[0], line1[1]) and\r\n isunder(xpts, ypts, line2[0], line2[1]) and\r\n not isunder(xpts, ypts, line3[0], line3[1]) and\r\n not isunder(xpts, ypts, line4[0], line4[1])):\r\n tot += 1\r\n return tot", "def surround(self, p):\n res = set([])\n if p.x + 1 < self.height:\n res.add((p.x + 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x + 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x + 1, p.y - 1))\n res.add((p.x, p.y - 1))\n if p.x - 1 >= 0:\n res.add((p.x - 1, p.y))\n if p.y + 1 < self.width:\n res.add((p.x - 1, p.y + 1))\n res.add((p.x, p.y + 1))\n if p.y - 1 >= 0:\n res.add((p.x - 1, p.y - 1))\n res.add((p.x, p.y - 1))\n return res", "def _find_corners(self) -> list:\n width, height = self.width, self.height\n return [(0, 0), (width, 0), (0, height), (width, height)]", "def sortColors(self, nums: List[int]) -> None:\n # initialize several pointers\n l = 0\n r = len(nums) - 1\n cur = 0\n \n # use two pointers on the two ends\n while (cur <= r):\n # if number is 0, swap with l (to stay on the left)\n if nums[cur] == 0:\n nums[l], nums[cur] = nums[cur], nums[l]\n l += 1\n cur += 1\n # if number is 2, swap with r (to stay on the right)\n elif nums[cur] == 2:\n nums[r], nums[cur] = nums[cur], nums[r]\n r -= 1\n else: \n cur += 1", "def sort_for_graham_scan(points: np.ndarray, primary: np.ndarray) -> np.ndarray:\n point_slopes = np.array([v[1] / v[0] for v in points])\n sorted_indexes = np.argsort(point_slopes)\n sorted_points = np.array(points)[sorted_indexes]\n hull = np.concatenate(\n (sorted_points[-1:], [primary], sorted_points)\n )\n return hull" ]
[ "0.6854596", "0.66109776", "0.6415772", "0.63892233", "0.62674105", "0.62512946", "0.6234069", "0.6007216", "0.6000448", "0.59593153", "0.5874499", "0.58601785", "0.58035123", "0.57796407", "0.5720378", "0.5702618", "0.5701921", "0.5618068", "0.56109095", "0.5608352", "0.55676436", "0.55615675", "0.5559752", "0.55573237", "0.55537015", "0.555081", "0.5545898", "0.552534", "0.5500313", "0.5448278" ]
0.7108666
0
Given 4 sorted corners, compute the homography between the corners and the rectangle's ground truth and return the information on the mapped plane. In other words, this function returns information on a plane (in particular, the desk's or wall's). The plane's origin is in the topleft corner of the rectangle, and the normal is perpendicular to that plane.
def get_H_R_t(self, corners: np.ndarray) -> Plane: H = cv.findHomography(self.inner_rectangle, corners)[0] result = self.K_inv @ H result /= cv.norm(result[:, 1]) r0, r1, t = np.hsplit(result, 3) r2 = np.cross(r0.T, r1.T).T _, u, vt = cv.SVDecomp(np.hstack([r0, r1, r2])) R = u @ vt return Plane(origin=t[:, 0], normal=R[:, 2], R=R)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d", "def fit_to_plane(pts):\n # Compute x_mean, y_mean, z_mean\n \n n = len(pts)\n \n x_total = 0\n y_total = 0\n z_total = 0\n\n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_mean = x_total * 1.0 / n\n y_mean = y_total * 1.0 / n\n z_mean = z_total * 1.0 / n\n\n # Compute the p[i] = [x[i]-x_mean,y[i]-y.mean,z[i]-z.mean]\n p = []\n for i in range(n):\n p1 = pts[i][0] - x_mean\n p2 = pts[i][1] - y_mean\n p3 = pts[i][2] - z_mean\n p.append([p1, p2, p3])\n \n # Compute the matrix A\n a1 = 0\n a2 = 0\n a3 = 0\n a4 = 0\n a5 = 0\n a6 = 0\n for i in range(n):\n a1 += p[i][0] * p[i][0]\n a2 += p[i][0] * p[i][1]\n a3 += p[i][0] * p[i][2]\n a4 += p[i][1] * p[i][1]\n a5 += p[i][1] * p[i][2]\n a6 += p[i][2] * p[i][2]\n\n A = np.array([[a1, a2, a3], [a2, a4, a5], [a3, a5, a6]])\n\n # Compute the smallest eigen value and accordingly eigen vector of A\n w, v = np.linalg.eigh(A)\n\n # The minimal eigenvalue is w[0]\n eig = w[0]\n\n # The norm is eigenvector v[:,0]\n norm = v[:,0].tolist()\n d = -norm[0] * x_mean - norm[1] * y_mean - norm[2] * z_mean\n\n return norm, d", "def triangle_plane_intersection(self,p0,p1,p2,point,normal):\n\t\ttol=0.00001\n\t\n\t\t# handle all of the stupid cases before we do costly math\n\t\n\t\t#basic stuff\n\t\tp0dp=numpy.dot(p0-point,normal)\n\t\tp1dp=numpy.dot(p1-point,normal)\n\t\tp2dp=numpy.dot(p2-point,normal)\n\t\tp0ip=numpy.abs(p0dp)<tol # p0 in-plane\n\t\tp1ip=numpy.abs(p1dp)<tol # p1 in-plane\n\t\tp2ip=numpy.abs(p2dp)<tol # p02in-plane\n\n\t\t# are all vertices of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(p2ip): # yes, triangle is in the plane\n\t\t\treturn [p0,p1,p2]\n\t\n\t\t# are all vertices of the triangle on the same side?\n\t\tif (not(p0ip))&(not(p1ip))&(not(p2ip))&(numpy.sign(p0dp)==numpy.sign(p1dp))&(numpy.sign(p0dp)==numpy.sign(p2dp)): # yup, they are all on the same side\n\t\t\treturn []\n\t\n\t\t# is one vertex in the plane?\n\t\tif (p0ip)&(not(p1ip))&(not(p2ip)): #just p0 in plane\n\t\t\treturn [p0]\n\t\telif (not(p0ip))&(p1ip)&(not(p2ip)): #just p1 in plane\n\t\t\treturn [p1]\n\t\telif (not(p0ip))&(not(p1ip))&(p2ip): #just p2 in plane\n\t\t\treturn [p2]\n\t\n\t\t# is one line of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(not(p2ip)): #L1 in plane\n\t\t\treturn [p0,p1]\n\t\telif (not(p0ip))&(p1ip)&(p2ip): #L2 in plane\n\t\t\treturn [p1,p2]\n\t\telif (p0ip)&(not(p1ip))&(p2ip): #L3 in plane\n\t\t\treturn [p0,p2]\n\t\n\t\t# if we have gotten this far, we have to actually calculate intersections\n\t\tif numpy.sign(p0dp)==numpy.sign(p1dp):\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l2b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l2i,l3i]\n\t\telif numpy.sign(p2dp)==numpy.sign(p1dp):\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l1b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l3i]\n\t\telse:\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tif (l1b)&(l2b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l2i]\n\t\n\t\t# If the function makes it this far, I have no idea what is going on.\n\t\treturn \"bananna pants\"", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def getPlane(entry):\n\n \n \n a,b,c = getNewLattice(entry,2)\n a_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,a)\n b_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,b)\n fracs = np.cross(a_vector,b_vector)\n fracs /= min([x for x in fracs if abs(x)>1E-4])\n \n return(fracs)", "def hyperplane(self):\n origin = (self.a+self.b+self.c)/3.\n normal = np.cross(self.a-self.b, self.a-self.c)\n return Hyperplane(origin, normal)", "def find_plane_eq(p1, p2, p3):\n\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # the cross product is a vector normal to the plane\n cp = np.cross(v1, v2)\n a, b, c = cp\n\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = np.dot(cp, p3)\n\n plane_eq = np.array([a, b, c, d])\n\n return plane_eq", "def computeHomography(src_pnt: np.ndarray, dst_pnt: np.ndarray) -> (np.ndarray, float):\r\n\r\n A = []\r\n for i in range(0, len(src_pnt)):\r\n x, y = src_pnt[i][0], src_pnt[i][1]\r\n u, v = dst_pnt[i][0], dst_pnt[i][1]\r\n A.append([x, y, 1, 0, 0, 0, -u * x, -u * y, -u])# like we saw in class append for evey point two rows\r\n A.append([0, 0, 0, x, y, 1, -v * x, -v * y, -v])\r\n\r\n A = np.asarray(A)\r\n U, S, Vh = np.linalg.svd(A) # use SVD to find the values of the variables in the matrix\r\n L = Vh[-1, :] / Vh[-1, -1] # divided by the last row like we see in the exercise\r\n H = L.reshape(3, 3) # reshaping to 3 by 3\r\n print(H) # print our Homography\r\n #print openCv homography\r\n M, mask = cv2.findHomography(src_pnt, dst_pnt)\r\n print(\"=======================\")\r\n print(M)\r\n return H", "def general_plane_intersection(n_a, da, n_b, db):\n \n # https://en.wikipedia.org/wiki/Intersection_curve\n \n n_a = np.array(n_a)\n n_b = np.array(n_b)\n da = np.array(da)\n db = np.array(db)\n \n l_v = np.cross(n_a, n_b)\n norm_l = sqrt(np.dot(l_v, l_v))\n if norm_l == 0:\n return None\n else:\n l_v /= norm_l\n aa = np.dot(n_a, n_a)\n bb = np.dot(n_b, n_b)\n ab = np.dot(n_a, n_b)\n d_ = 1./(aa*bb - ab*ab)\n l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b\n \n return l_v, l_0", "def borehole_plane_intersection(self):\n\n # 1. Step: Compute direction vectors to each borehole ==========================================================\n borehole_data = self.borehole_geometry.copy()\n borehole_data[\"depth\"] = 0\n borehole_to_global_coords(\n data=borehole_data,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n depth=\"depth\",\n upward_gradient=\"upward_gradient\",\n azimuth=\"azimuth\",\n )\n\n # Extract relevant columns from borehole data\n _mask = [\"borehole\", \"x_gts\", \"y_gts\", \"z_gts\", \"_trig_x\", \"_trig_y\", \"_trig_z\"]\n bh_data = borehole_data[_mask]\n\n mapper = {\n \"x_gts\": \"x_bh\",\n \"y_gts\": \"y_bh\",\n \"z_gts\": \"z_bh\",\n \"_trig_x\": \"r_x\",\n \"_trig_y\": \"r_y\",\n \"_trig_z\": \"r_z\",\n }\n bh_data = bh_data.rename(columns=mapper)\n\n # 2. Step: Calculate shear-zone unit normals and centroids =====================================================\n sz = self.planes()\n\n # 3. Step: Extract shear-zone borehole geometry ================================================================\n # i.e. only the shear-zones used for computing shear-zone planes.\n sz_bh = self.shearzone_borehole_geometry.copy()\n sz_bh = sz_bh[sz_bh.depth.notna()]\n sz_bh = sz_bh.rename(columns={\"depth\": \"old_depth\"})\n\n # 4. Step: Merge the collected data ============================================================================\n df = sz.merge(sz_bh, on=\"shearzone\").merge(bh_data, on=\"borehole\")\n\n # 5. Step: Calculate new shear-zone borehole intersections. ====================================================\n # Quantities\n n_vec = [\"n_x\", \"n_y\", \"n_z\"]\n r_vec = [\"r_x\", \"r_y\", \"r_z\"]\n bh_coords = [\"x_bh\", \"y_bh\", \"z_bh\"]\n sz_coords = [\"x_c\", \"y_c\", \"z_c\"]\n\n # Depth calculation\n df[\"depth\"] = (\n (df[sz_coords].values - df[bh_coords].values) * df[n_vec].values\n ).sum(axis=1) / (df[n_vec].values * df[r_vec].values).sum(axis=1)\n\n # Calculate global coordinates\n df.loc[:, \"x_sz\"] = df.x_bh + (df.depth * df.r_x)\n df.loc[:, \"y_sz\"] = df.y_bh + (df.depth * df.r_y)\n df.loc[:, \"z_sz\"] = df.z_bh + (df.depth * df.r_z)\n\n return df", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def computeFacePlane(nodes, bFaces, bFacesN):\n # Number of boundary faces\n nBndFaces = np.size(bFaces)\n\n # Get computational domain limits\n min_x = np.min(nodes[:,0])\n max_x = np.max(nodes[:,0])\n min_y = np.min(nodes[:,1])\n max_y = np.max(nodes[:,1])\n min_z = np.min(nodes[:,2])\n max_z = np.max(nodes[:,2])\n\n # Set plane equation for each side\n # We consider following configuration:\n\n # g ------------- h\n # /| /| where:\n # / | / | a = [min_x, min_y, min_z]\n # / | / | b = [max_x, min_y, min_z]\n # / | / | c = [min_x, max_y, min_z]\n # e --------------f | d = [max_x, max_y, min_z]\n # | | | | e = [min_x, min_y, max_z]\n # | |c --------|----|d f = [max_x, min_y, max_z]\n # | / | / g = [min_x, max_y, max_z]\n # | / | / h = [max_x, max_y, max_z]\n # | / | /\n # a ------------- b\n\n # With xyz-axis:\n # Z+ Y+\n # | /\n # | /\n # |/\n # ------X+\n\n # Therefore, each plane of the cube is defined by two vectors:\n # Bottom: ab, ac ---> flag: 0\n # Left: ac, ae ---> flag: 1\n # Front: ab, ae ---> flag: 2\n # Rigth: bd, bf ---> flag: 3\n # Back: dc, dh ---> flag: 4\n # Top: ef, eg ---> flag: 5\n\n # Initialize main points\n a = np.array([min_x, min_y, min_z], np.float)\n b = np.array([max_x, min_y, min_z], np.float)\n c = np.array([min_x, max_y, min_z], np.float)\n d = np.array([max_x, max_y, min_z], np.float)\n e = np.array([min_x, min_y, max_z], np.float)\n f = np.array([max_x, min_y, max_z], np.float)\n g = np.array([min_x, max_y, max_z], np.float)\n h = np.array([max_x, max_y, max_z], np.float)\n\n # Compute normal vectors to planes\n normal_bottom = np.cross(a-b, a-c)\n normal_left = np.cross(a-c, a-e)\n normal_front = np.cross(a-b, a-e)\n normal_right = np.cross(b-d, b-f)\n normal_back = np.cross(d-c, d-h)\n normal_top = np.cross(e-f, e-g)\n\n # Allocate space for tag plane\n planeFace = np.zeros(nBndFaces, dtype=np.int)\n plane_list = np.zeros(6, dtype=np.float)\n\n # Solve plane equation for each boundary face\n for i in np.arange(nBndFaces):\n # Get nodes of face\n faceNodes = nodes[bFacesN[:,i],:]\n\n # Compute face centroid\n centroid = np.sum(faceNodes, 0)/3.\n\n # Solve equation for bottom plane\n plane_list[0] = np.dot(normal_bottom, centroid-a)\n # Solve equation for left plane\n plane_list[1] = np.dot(normal_left, centroid-a)\n # Solve equation for front plane\n plane_list[2] = np.dot(normal_front, centroid-a)\n # Solve equation for right plane\n plane_list[3] = np.dot(normal_right, centroid-b)\n # Solve equation for back plane\n plane_list[4] = np.dot(normal_back, centroid-d)\n # Solve equation for top plane\n plane_list[5] = np.dot(normal_top, centroid-e)\n # Get to what plane the face belongs\n # Flags for faces:\n # Bottom ---> flag: 0\n # Left ---> flag: 1\n # Front ---> flag: 2\n # Rigth ---> flag: 3\n # Back ---> flag: 4\n # Top ---> flag: 5\n planeFace[i] = np.where(np.abs(plane_list)<1.e-13)[0][0]\n\n return planeFace", "def homograph_warp(img,pose,plane,intrinsics_a,intrinsics_b,rotation_mode='so3',padding_mode='zeros'):\n\n check_sizes(img, 'img', 'B3HW')\n check_sizes(plane, 'depth', 'B4')\n check_sizes(pose, 'pose', 'B6')\n check_sizes(intrinsics_a, 'intrinsics_a', 'B33')\n check_sizes(intrinsics_b, 'intrinsics_b', 'B33')\n\n batch_size, _, img_height, img_width = img.size()\n\n b, h, w = batch_size,img_height,img_width\n if (pixel_coords is None) or pixel_coords.size(2) < h:\n set_id_grid(img)\n\n homo_mat = homo_vec2mat(pose,plane,intrinsics_a,intrinsics_b,rotation_mode)\n print(homo_mat.squeeze())\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w) # [B, 3, H,W]\n src_pixel_coords = homo_project(current_pixel_coords,homo_mat) # [B,H,W,2]\n projected_img = None\n if torch.__version__ !='1.1.0.post2':\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode,align_corners=False)\n\n else:\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1\n\n return projected_img, valid_points", "def BoundingBoxPlane(objs,plane,ret_pts=False,accurate=True):\n wxy_plane=Rhino.Geometry.Plane.WorldXY\n def __objectbbox(geom,xform):\n if isinstance(geom,Rhino.Geometry.Point):\n pt=geom.Location\n if xform: pt = xform * pt\n return Rhino.Geometry.BoundingBox(pt,pt)\n if xform: return geom.GetBoundingBox(xform)\n return geom.GetBoundingBox(accurate)\n \n xform = Rhino.Geometry.Transform.ChangeBasis(wxy_plane, plane)\n bbox = Rhino.Geometry.BoundingBox.Empty\n if type(objs) is list or type(objs) is tuple:\n for obj in objs:\n objectbbox = __objectbbox(obj, xform)\n bbox = Rhino.Geometry.BoundingBox.Union(bbox,objectbbox)\n else:\n objectbbox = __objectbbox(objs, xform)\n bbox = Rhino.Geometry.BoundingBox.Union(bbox,objectbbox)\n if not bbox.IsValid: return\n plane_to_world = Rhino.Geometry.Transform.ChangeBasis(plane,wxy_plane)\n if ret_pts:\n corners = list(bbox.GetCorners())\n for pt in corners: pt.Transform(plane_to_world)\n return corners\n else:\n box=Rhino.Geometry.Box(bbox)\n box.Transform(plane_to_world)\n return box", "def compute_corners(landmarks, mode):\n right_to_left_eye = landmarks[:, :2] - landmarks[:, 2:4]\n middle_eye = (landmarks[:, :2] + landmarks[:, 2:4]) / 2\n eye_to_mouth = landmarks[:, 4:6] - middle_eye\n centers = middle_eye\n\n if np.linalg.norm(right_to_left_eye) > np.linalg.norm(eye_to_mouth):\n vec_right = right_to_left_eye\n vec_down = np.fliplr(vec_right).copy()\n vec_down[:, 0] *= -1.\n else:\n vec_down = eye_to_mouth\n vec_right = np.fliplr(vec_down).copy()\n vec_right[:, 1] *= -1.\n\n if mode == 'face':\n scale = 1.8\n elif mode == 'eyes':\n vec_down *= 0.33\n scale = 1.\n else:\n raise NotImplementedError()\n\n diag = scale * (vec_right + vec_down)\n top_left = centers - diag\n top_right = top_left + 2 * scale * vec_right\n bottom_left = top_left + 2 * scale * vec_down\n bottom_right = centers + diag\n\n return top_left, top_right, bottom_left, bottom_right", "def project_plane_to_2d(xyz_arr, img, center, dist_thresh):\n\tplane_img = np.zeros(img.size)\n\tplane_img[xyz_arr[:, 2] > dist_thresh + center[2]] = 1\n\n\tplane_img = np.uint8(np.reshape(plane_img, (424, 512)) * 255) # reshape to match depth data and convert to uint8\n\tplane_img = np.uint8(\n\t\t(np.ones((424, 512)) * 255) - plane_img) # invert img so pixel value corresponds to NOT ground plane\n\tret, plane_img = cv2.threshold(plane_img, 0, 255,\n\t\t\t\t\t\t\t\t cv2.THRESH_BINARY) # filter points that are probaly not ground plane\n\tplane_img = cv2.subtract(img, plane_img)\n\treturn plane_img", "def bestfit_plane_from_points(points):\n centroid = centroid_points(points)\n\n xx, xy, xz = 0., 0., 0.\n yy, yz, zz = 0., 0., 0.\n\n for point in points:\n rx, ry, rz = subtract_vectors(point, centroid)\n xx += rx * rx\n xy += rx * ry\n xz += rx * rz\n yy += ry * ry\n yz += ry * rz\n zz += rz * rz\n\n det_x = yy * zz - yz * yz\n det_y = xx * zz - xz * xz\n det_z = xx * yy - xy * xy\n\n det_max = max(det_x, det_y, det_z)\n\n if det_max == det_x:\n a = (xz * yz - xy * zz) / det_x\n b = (xy * yz - xz * yy) / det_x\n normal = (1., a, b)\n elif det_max == det_y:\n a = (yz * xz - xy * zz) / det_y\n b = (xy * xz - yz * xx) / det_y\n normal = (a, 1., b)\n else:\n a = (yz * xy - xz * yy) / det_z\n b = (xz * xy - yz * xx) / det_z\n normal = (a, b, 1.)\n\n return centroid, normalize_vector(normal)", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def polygon_descriptors(corners):\n n_points = corners.shape[1]\n p, a, cx, cy = 0, 0, 0, 0\n for i in xrange(n_points):\n j = (i+1) % n_points\n dot = corners[0,i]*corners[1,j] - corners[0,j]*corners[1,i]\n a += dot\n cx += (corners[0,i] + corners[0,j]) * dot\n cy += (corners[1,i] + corners[1,j]) * dot\n p += np.linalg.norm(corners[:,i] - corners[:,j])\n a /= 2\n cx /= 6*a\n cy /= 6*a\n a = abs(a)\n return (p, a, (cx,cy))", "def random_hyperplane(bounds):\n\n (bounds, dimension) = infer_dimension(bounds)\n intercept_points = random_plane_points(dimension, bounds)\n\n # Solve for weights. Bias is arbitrary\n bias = 1\n line_weights = np.linalg.solve(intercept_points, \n np.ones((dimension, 1)) * - bias)\n line_weights *= np.sign(np.random.rand(1) - 0.5) # Randomize direction\n\n return np.append(bias, line_weights)", "def get_h_matrices(poly_curve, x, y, orientation=0):\n orientations = {'bottom_right': 0, 'bottom_left': 1, 'top_right': 2, 'top_left': 3}\n x_width = np.zeros(4, dtype=int)\n y_width = np.zeros(4, dtype=int)\n x_center = np.array([0, x, x, 0])\n y_center = np.array([0, 0, y, y])\n\n # Define width to perform homogeneous transforms\n if orientation == orientations['bottom_right']:\n x_width[0], y_width[0] = poly_curve[0][0][0], poly_curve[0][0][1]\n x_width[1], y_width[1] = poly_curve[1][0][0], poly_curve[1][0][1]\n x_width[2], y_width[2] = poly_curve[2][0][0], poly_curve[2][0][1]\n x_width[3], y_width[3] = poly_curve[3][0][0], poly_curve[3][0][1]\n elif orientation == orientations['bottom_left']:\n x_width[0], y_width[0] = poly_curve[1][0][0], poly_curve[1][0][1]\n x_width[1], y_width[1] = poly_curve[2][0][0], poly_curve[2][0][1]\n x_width[2], y_width[2] = poly_curve[3][0][0], poly_curve[3][0][1]\n x_width[3], y_width[3] = poly_curve[0][0][0], poly_curve[0][0][1]\n elif orientation == orientations['top_right']:\n x_width[0], y_width[0] = poly_curve[2][0][0], poly_curve[2][0][1]\n x_width[1], y_width[1] = poly_curve[3][0][0], poly_curve[3][0][1]\n x_width[2], y_width[2] = poly_curve[0][0][0], poly_curve[0][0][1]\n x_width[3], y_width[3] = poly_curve[1][0][0], poly_curve[1][0][1]\n elif orientation == orientations['top_left']:\n x_width[0], y_width[0] = poly_curve[3][0][0], poly_curve[3][0][1]\n x_width[1], y_width[1] = poly_curve[0][0][0], poly_curve[0][0][1]\n x_width[2], y_width[2] = poly_curve[1][0][0], poly_curve[1][0][1]\n x_width[3], y_width[3] = poly_curve[2][0][0], poly_curve[2][0][1]\n else:\n print('Incorrect Orientation!!')\n quit()\n\n # Evaluate the A matrix\n a_mat = [[x_width[0], y_width[0], 1, 0, 0, 0, -x_center[0] * x_width[0], -x_center[0] * y_width[0], -x_center[0]],\n [0, 0, 0, x_width[0], y_width[0], 1, -y_center[0] * x_width[0], -y_center[0] * y_width[0], -y_center[0]],\n [x_width[1], y_width[1], 1, 0, 0, 0, -x_center[1] * x_width[1], -x_center[1] * y_width[1], -x_center[1]],\n [0, 0, 0, x_width[1], y_width[1], 1, -y_center[1] * x_width[1], -y_center[1] * y_width[1], -y_center[1]],\n [x_width[2], y_width[2], 1, 0, 0, 0, -x_center[2] * x_width[2], -x_center[2] * y_width[2], -x_center[2]],\n [0, 0, 0, x_width[2], y_width[2], 1, -y_center[2] * x_width[2], -y_center[2] * y_width[2], -y_center[2]],\n [x_width[3], y_width[3], 1, 0, 0, 0, -x_center[3] * x_width[3], -x_center[3] * y_width[3], -x_center[3]],\n [0, 0, 0, x_width[3], y_width[3], 1, -y_center[3] * x_width[3], -y_center[3] * y_width[3], -y_center[3]]]\n # Get inverse homogeneous transform using svd\n _, _, v_h = np.linalg.svd(a_mat, full_matrices=True)\n h_mat = np.array(v_h[8, :] / v_h[8, 8]).reshape((-1, 3))\n inv_h = np.linalg.inv(h_mat)\n # Return inverse homogeneous transform\n return h_mat, inv_h", "def distance_from_xy_plane(p,r):\n return np.abs(p[2]-r[2])", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def corners(self, right = True):\n\n # The coordinates of the corners of the untransformed section, in\n # counter-clockwise order starting at the upper, left corner.\n coords = [[-0.5 * self.size[0], -0.5 * self.size[1]],\n [-0.5 * self.size[0], +0.5 * self.size[1]],\n [+0.5 * self.size[0], +0.5 * self.size[1]],\n [+0.5 * self.size[0], -0.5 * self.size[1]]]\n\n # Determine the cosine and sine of the rotation angle, rounded to\n # a multiple of 90 degrees if appropriate.\n if (right):\n a = math.radians(90.0 * round(self.angle / 90.0))\n else:\n a = math.radians(self.angle)\n c = math.cos(a)\n s = math.sin(a)\n\n # Apply plane rotation, and translation.\n for i in range(len(coords)):\n p = coords[i]\n coords[i] = [c * p[0] - s * p[1] + self.center[0],\n s * p[0] + c * p[1] + self.center[1]]\n return (coords)" ]
[ "0.58668405", "0.58515084", "0.5808322", "0.5808322", "0.5799701", "0.5799701", "0.5593243", "0.55419457", "0.5481281", "0.5459112", "0.54404145", "0.5418466", "0.541714", "0.53940755", "0.53733194", "0.5373061", "0.5357221", "0.5352762", "0.5341513", "0.53396374", "0.5331517", "0.5328911", "0.53193164", "0.5306342", "0.52943134", "0.5289913", "0.5216413", "0.5212621", "0.5211346", "0.5204207" ]
0.6981937
0
Given an image and a rectangle defining a region, return the laser points in that region. In case we are considering the wall or the desk, require at least 30 points for better accuracy.
def get_laser_points_in_region( self, image: np.ndarray, region: Rectangle, is_obj: bool = False, ) -> Optional[np.ndarray]: top_left = region.top_left bottom_right = region.bottom_right region_image = image[top_left.y : bottom_right.y, top_left.x : bottom_right.x] image_inv = cv.cvtColor(~region_image, cv.COLOR_BGR2HSV) lower_red = self.lower_red_obj if is_obj else self.lower_red_planes red_mask = cv.inRange(image_inv, lower_red, self.upper_red) laser_points = cv.findNonZero(red_mask) if laser_points is None or (not is_obj and len(laser_points) < 30): return None return laser_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_laser_points(\r\n self,\r\n original_image: np.ndarray,\r\n image: np.ndarray,\r\n extreme_points: ExtremePoints,\r\n ) -> Tuple[\r\n Optional[np.ndarray],\r\n Optional[np.ndarray],\r\n Optional[np.ndarray],\r\n Optional[np.ndarray],\r\n ]:\r\n height, width = image.shape[:2]\r\n ymin_wall = extreme_points.wall.top_left.y\r\n ymax_wall = extreme_points.wall.bottom_right.y\r\n ymin_desk = extreme_points.desk.top_left.y\r\n xmin = extreme_points.desk.top_left.x\r\n laser_desk = self.get_laser_points_in_region(\r\n image=image,\r\n region=Rectangle(\r\n top_left=Point(0, ymin_desk - ymin_wall),\r\n bottom_right=Point(width, height),\r\n ),\r\n )\r\n if laser_desk is not None:\r\n laser_wall = self.get_laser_points_in_region(\r\n image=image,\r\n region=Rectangle(\r\n top_left=Point(0, 0),\r\n bottom_right=Point(width, ymax_wall - ymin_wall),\r\n ),\r\n )\r\n if laser_wall is not None:\r\n laser_obj = self.get_laser_points_in_region(\r\n image=image,\r\n region=Rectangle(\r\n top_left=Point(0, ymax_wall - ymin_wall),\r\n bottom_right=Point(width, ymin_desk - ymin_wall),\r\n ),\r\n is_obj=True,\r\n )\r\n if laser_obj is not None:\r\n laser_desk = self.offset_points(\r\n points=laser_desk, offset=Point(xmin, ymin_desk)\r\n )\r\n laser_wall = self.offset_points(\r\n points=laser_wall, offset=Point(xmin, ymin_wall)\r\n )\r\n laser_obj = self.remove_obj_outliers(laser_obj)\r\n if laser_obj is not None:\r\n laser_obj = self.offset_points(\r\n points=laser_obj, offset=Point(xmin, ymax_wall)\r\n )\r\n obj_colors = self.get_colors(original_image, laser_obj)\r\n return laser_wall, laser_desk, laser_obj, obj_colors\r\n return None, None, None, None", "def find_lane(self, warped_img):\n if not self.detected:\n # Use sliding window if lanes are not detected on the previous frame\n leftx, lefty, rightx, righty, out_img = leftx, lefty, rightx, righty, out_img = find_lane_sliding_window(warped_img)\n left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(warped_img.shape, leftx, lefty, rightx, righty)\n curvature, distance = curvature_and_position(ploty, left_fit, right_fit, warped_img.shape[1])\n self.last_left_fit = left_fit\n self.last_right_fit = right_fit\n self.last_ploty = ploty\n self.detected = True\n return left_fitx, right_fitx, ploty, curvature, distance\n \n else:\n try:\n leftx, lefty, rightx, righty, out_img = find_lane_from_prior(warped_img, self.last_left_fit, self.last_right_fit, self.ploty)\n left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(warped_img.shape, leftx, lefty, rightx, righty)\n curvature, distance = curvature_and_position(ploty, left_fit, right_fit, warped_img.shape[1])\n \n # If the distance doesn't make sense, use sliding window to search again\n if abs(distance) > self.tol_dist:\n self.detected = False\n return self.find_lane(warped_img)\n else:\n self.last_left_fit = left_fit\n self.last_right_fit = right_fit\n self.last_ploty = ploty\n self.detected = True\n return left_fitx, right_fitx, ploty, curvature, distance\n except:\n # Exception raised by fitpoly when left/right is empty\n self.detected = False\n return self.find_lane(warped_img)", "def findslopes(img):\n img = img.astype(np.float32)\n DY = np.array([[-1,-1,-1],[0, 0, 0],[1, 1, 1]]) * 1/6\n DX = DY.transpose()\n gradx = cv2.filter2D(src=img, ddepth=-1, kernel=DX)\n grady = cv2.filter2D(src=img, ddepth=-1, kernel=DY)\n\n D2Y = np.array([[0.5, 1, 0.5], [-1, -2, -1], [0.5, 1, 0.5]]) * 0.5\n D2X = D2Y.transpose()\n DXY = np.array([[-1, 0, 1], [0, 0, 0], [1, 0, -1]]) * 1/4\n grad2x = cv2.filter2D(src=img, ddepth=-1, kernel=D2X)\n grad2y = cv2.filter2D(src=img, ddepth=-1, kernel=D2Y)\n gradxy = cv2.filter2D(src=img, ddepth=-1, kernel=DXY)\n\n slopes = gradx**2 + grady**2\n slopes2 = grad2x**2 + grad2y**2 + 2 * gradxy**2\n\n return (slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy)", "def draw_laser_ranges():\n NUM_RANGES = len(D.ranges) # should be 360\n if False: #for easy commenting out...\n for angle in range(NUM_RANGES):\n print angle, \":\", D.ranges[angle] \n \n # helpful starting points, perhaps:\n # add line to the ranges image, \"D.image\"\n #cv.Line(D.image, (42,100), (100,42), cv.RGB(255, 0, 0), 1) # 1 == thickness\n # add dots to image being used to compute the Hough tr. \"D.hough\"\n # cv.Line(D.hough, (42,42), (42,42), 255, 2) # 1 == thickness\n for angle in range(NUM_RANGES):\n point = (CENTER + int(0.2*D.ranges[angle]*sin(radians(angle))), CENTER + int(0.2*D.ranges[angle]*cos(radians(angle))))\n cv.Line(D.image, (CENTER,CENTER), point, cv.RGB(255, 0 , 0), 1)\n cv.Line(D.hough, point, point, 255, 2) \n\n return", "def get_landmarks(self,image):\n landmarks=[]\n #Convert image to gray\n gray = cvtColor(image,COLOR_BGR2GRAY)\n #deNoise image\n gray = medianBlur(gray,self.kernalSize)\n #Convert into a binary image\n th2 = adaptiveThreshold(gray,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,\n self.thresholdingBlockSize,self.thresholdingConstant)\n #Find edges in image\n edges = Canny(th2,self.edgeThreshold1,self.edgeThreshold2,\n apertureSize = self.edgeApertureSize)\n #Find lines in image\n lines = HoughLines (edges,self.distanceRange,\n self.angleRange,self.lineThreshold)\n if lines is not None:\n for line in lines: \n for rho,theta in line:\n if theta < 0.5 or theta > math.pi-0.5: #~20 degrees\n angle = (rho*self.fieldOfView/image.shape[1])-(self.fieldOfView/2)\n #landmarks.append((radian(angles),self.angularError))\n landmarks.append((rho, theta))\n pass\n return landmarks", "def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):\r\n\r\n def sample_func(sample_point):\r\n \"\"\" Method used to sample image. \"\"\"\r\n if sample_point[0] < 0 \\\r\n or sample_point[1] < 0 \\\r\n or sample_point[0] >= image.width \\\r\n or sample_point[1] >= image.height:\r\n return 0\r\n\r\n point_tuple = sample_point[0], sample_point[1]\r\n color = image.getpixel(point_tuple)\r\n if color[3] > 0:\r\n return 255\r\n else:\r\n return 0\r\n\r\n # Do a quick check if it is a full tile\r\n p1 = 0, 0\r\n p2 = 0, image.height - 1\r\n p3 = image.width - 1, image.height - 1\r\n p4 = image.width - 1, 0\r\n\r\n if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):\r\n # Do a quick check if it is a full tile\r\n p1 = (-image.width / 2, -image.height / 2)\r\n p2 = (image.width / 2, -image.height / 2)\r\n p3 = (image.width / 2, image.height / 2)\r\n p4 = (-image.width / 2, image.height / 2)\r\n\r\n return p1, p2, p3, p4\r\n\r\n # Get the bounding box\r\n logo_bb = pymunk.BB(-1, -1, image.width, image.height)\r\n\r\n # Set of lines that trace the image\r\n line_set = pymunk.autogeometry.PolylineSet()\r\n\r\n # How often to sample?\r\n downres = 1\r\n horizontal_samples = int(image.width / downres)\r\n vertical_samples = int(image.height / downres)\r\n\r\n # Run the trace\r\n # Get back one or more sets of lines covering stuff.\r\n line_sets = pymunk.autogeometry.march_soft(\r\n logo_bb,\r\n horizontal_samples, vertical_samples,\r\n 99,\r\n sample_func)\r\n\r\n if len(line_sets) == 0:\r\n return []\r\n\r\n selected_line_set = line_sets[0]\r\n selected_range = None\r\n if len(line_set) > 1:\r\n # We have more than one line set. Try and find one that covers most of\r\n # the sprite.\r\n for line in line_set:\r\n min_x = None\r\n min_y = None\r\n max_x = None\r\n max_y = None\r\n for point in line:\r\n if min_x is None or point.x < min_x:\r\n min_x = point.x\r\n if max_x is None or point.x > max_x:\r\n max_x = point.x\r\n if min_y is None or point.y < min_y:\r\n min_y = point.y\r\n if max_y is None or point.y > max_y:\r\n max_y = point.y\r\n\r\n if min_x is None or max_x is None or min_y is None or max_y is None:\r\n raise ValueError(\"No points in bounding box.\")\r\n\r\n my_range = max_x - min_x + max_y + min_y\r\n if selected_range is None or my_range > selected_range:\r\n selected_range = my_range\r\n selected_line_set = line\r\n\r\n # Reduce number of vertices\r\n # original_points = len(selected_line_set)\r\n selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,\r\n hit_box_detail)\r\n # downsampled_points = len(selected_line_set)\r\n\r\n # Convert to normal points, offset fo 0,0 is center, flip the y\r\n hh = image.height / 2\r\n hw = image.width / 2\r\n points = []\r\n for vec2 in selected_line_set:\r\n point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)\r\n points.append(point)\r\n\r\n if len(points) > 1 and points[0] == points[-1]:\r\n points.pop()\r\n\r\n # print(f\"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}\")\r\n return points", "def get_edges(image):\n if len(image.shape) == 3:\n # has more than one channel\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n kernel = np.ones((5, 5), np.float32)/5\n dst = cv2.filter2D(image, -1, kernel)\n lap = cv2.Laplacian(dst, cv2.CV_64F)\n return lap", "def detect_lane_edges(image):\n \n GAUSS_KERNEL = 7 # must be odd <7,5>\n \n CANNY_LOW = 100 # not an edge\n CANNY_HIGH = 200 # definitely an edge\n \n REGION_TRAP_XB = 5 # percent in horizontally from edge for bottom of trapezoid\n REGION_TRAP_XT = 45 # percent in horizontally from edge for top of trapezoid\n REGION_TRAP_YT = 60 # percent down vertically from edge for top of trapezoid\n \n image_wk = np.copy(image) # working copy\n \n # Apply Gaussian blur\n image_wk = gaussian_blur(image_wk, GAUSS_KERNEL)\n \n # Apply Canny edge detection\n image_wk = canny(image_wk, CANNY_LOW, CANNY_HIGH)\n \n # Apply trapezoidal region mask\n im_y = image_wk.shape[0]\n im_x = image_wk.shape[1]\n trap_bl = (np.int32(REGION_TRAP_XB/100*im_x), im_y)\n trap_tl = (np.int32(REGION_TRAP_XT/100*im_x), np.int32(REGION_TRAP_YT/100*im_y))\n trap_tr = (im_x - np.int32(REGION_TRAP_XT/100*im_x), np.int32(REGION_TRAP_YT/100*im_y))\n trap_br = (im_x - np.int32(REGION_TRAP_XB/100*im_x), im_y)\n vertices = np.array([[trap_bl, trap_tl, trap_tr, trap_br]], dtype=np.int32)\n image_wk = region_of_interest(image_wk, vertices)\n \n # Output edge-detected image masked by trapezoidal region\n return image_wk", "def set_lanes(left_lines, right_lines, image):\n \n Y_LANE_EXTRAP = 35 # percent up from bottom of image to extrapolate lane lines\n \n image_wk = np.copy(image) # working copy\n image_lines = np.copy(image_wk)*0 # create a blank to draw lines on\n im_y = image_wk.shape[0]\n \n y1_lane = im_y\n y2_lane = np.int32(im_y - (Y_LANE_EXTRAP/100*im_y))\n \n # Process left lane\n if left_lines:\n z_left = my_linear_polyfit(left_lines)\n x1_lane = np.int32( (y1_lane - z_left[1]) / z_left[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_left[1]) / z_left[0] )\n \n # Draw left lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Process right lane\n if right_lines:\n z_right = my_linear_polyfit(right_lines)\n x1_lane = np.int32( (y1_lane - z_right[1]) / z_right[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_right[1]) / z_right[0] )\n \n # Draw right lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Overlay detected left/right lanes on road image\n image_wk = weighted_img(image_lines, image_wk)\n \n # Output road image with overlaid left/right lanes\n return image_wk", "def find_lane_slow(img):\n histogram = np.sum(img[img.shape[0] // 2:, :], axis=0)\n\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0] / 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(img.shape[0] / nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 150\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\n nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\n nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n\n return left_fit, right_fit", "def my_lane_detection_pipeline(image, debug_images=False):\n\n # Step 1 - Filter and enhance image by lane color\n image_s1 = filter_lane_color(image)\n \n # Step 2 - Canny edge detection with Gaussian blur and region mask\n image_s2 = detect_lane_edges(image_s1)\n \n # Step 3 - Raw line detection by Hough transform and classify left/right by angle\n (image_s3, left_lines, right_lines) = detect_lane_lines(image_s2, image)\n \n # Step 4 - Set left/right lanes by weighted linear polyfit of raw lines\n image_s4 = set_lanes(left_lines, right_lines, image_s3)\n \n # Save images of each step for debugging and documentation\n if debug_images:\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s0.jpg'), image)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s1.jpg'), image_s1, cmap = 'gray')\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s2.jpg'), image_s2)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s3.jpg'), image_s3)\n mpimg.imsave('test_images_output/'+image_name.replace('.jpg','_s4.jpg'), image_s4)\n \n # Output image with overlaid raw lane lines and detected left/right lanes\n return image_s4", "def traffic_light_detection(img_in, radii_range, noisy_image=False, max_x_offset=5):\n\n img = process_base_image(img_in, (7, 7))\n\n # find all the circles in an image using Hough Circles\n min_radii = min(radii_range)\n max_radii = max(radii_range)\n # the distance between the circles should be the smallest possible circles that can touch.\n min_dist = min_radii * 2 + 10\n\n # img, dp, min_dist, param1, param2, minRad, maxRad\n if noisy_image:\n circles = hough_circles(img, 1.55, min_dist, 20, 15, min_radii, max_radii)\n else:\n circles = hough_circles(img, 1.125, min_dist, 30, 20, min_radii, max_radii)\n\n if circles is None:\n return (0, 0), None\n else:\n # cleanup circles so its easier to use.\n circles = circles[0, :]\n # round the numbers of the array to uint16 values.\n circles = np.uint16(np.around(circles))\n\n if len(circles) < 3:\n return (1000, 1000), None\n else: # If there are more than 3 circles found, eliminate the outliers that shouldn't be detected.\n # sort the circles first by x, then by Radius value, then by Y value.\n circles = sorted(circles, key=lambda c: (c[0], c[2], c[1]))\n\n # since the traffic lights will be a group of 3 circles with a similar radius, then x value, then somewhat close\n # in y value, use a \"window\" type of sliding group to create groups of 3 circles that can then be compared\n # to each other to see if they would make up circles of a traffic light.\n circle_groups = []\n for c_idx in range(len(circles) - 2):\n circle_group = circles[c_idx: c_idx + 3] # build the group\n circle_groups.append(circle_group)\n\n circle_groups = np.array(circle_groups)\n # for each circle group found, need to figure out the group with the lowest overall standard deviation.\n # for each group, calculate the std deviations.\n group_deviations = np.array([circle_group_deviations(g) for g in circle_groups])\n\n most_similar_idx = np.argmin(group_deviations)\n final_circles = circle_groups[most_similar_idx]\n\n # if the circles aren't close to each other in the X direction, return\n # none since its not a traffic light.\n x_diffs = np.diff(final_circles[:, 0])\n if np.any(x_diffs >= max_x_offset):\n return (None, None), None\n\n # sort the circles from top down to allow color compare.\n circles = final_circles[np.argsort(final_circles[:, 1])] # sort by Y direction.\n # creating some names for clarity due to x, y being col, row.\n red_row, red_col, yellow_row, yellow_col, green_row, green_col = [\n circles[0][1],\n circles[0][0],\n circles[1][1],\n circles[1][0],\n circles[2][1],\n circles[2][0],\n ]\n\n # determine colors.\n state = 'yellow' # default state.\n cords = (yellow_col, yellow_row)\n\n red_color = np.array([0, 0, 255])\n green_color = np.array([0, 255, 0])\n\n # stop for false positive labels.\n if img_in[yellow_row, yellow_col][0] > 10:\n return (None, None), None\n\n if (img_in[red_row, red_col] == red_color).all():\n state = 'red'\n elif (img_in[green_row, green_col] == green_color).all():\n state = 'green'\n\n # print 'Color of TL midpoint is {}'.format(img_in[yellow_row, yellow_col])\n\n return cords, state", "def detect_lane_lines(image_edges, image_orig):\n \n HOUGH_RHO = 1 # distance resolution in pixels of the Hough grid\n HOUGH_THETA = np.pi/180 # angular resolution in radians of the Hough grid\n HOUGH_THRESH = 15 # minimum number of votes (intersections in Hough grid cell) <15,20>\n HOUGH_MIN_LEN = 40 # minimum number of pixels making up a line <40,100>\n HOUGH_MAX_GAP = 100 # maximum gap in pixels between connectable line segments <100,250>\n \n LINE_MIN_ANGLE = 20 # degrees\n \n image_wk = np.copy(image_orig) # working copy\n \n # Run Hough transform on edge-detected image\n raw_lines = cv2.HoughLinesP(image_edges, HOUGH_RHO, HOUGH_THETA, HOUGH_THRESH, np.array([]),\n HOUGH_MIN_LEN, HOUGH_MAX_GAP)\n \n # Group lines by left/right angle and side of center line\n left_lines = []\n right_lines = []\n x_center = np.int32((image_wk.shape[1]/2))\n for line in raw_lines:\n for x1, y1, x2, y2 in line:\n theta = np.arctan((y2-y1)/(x2-x1)) /np.pi*180\n \n if (theta < -LINE_MIN_ANGLE) and (x1 < x_center) and (x2 < x_center):\n left_lines.append(line)\n \n elif (theta > LINE_MIN_ANGLE) and (x1 > x_center) and (x2 > x_center):\n right_lines.append(line)\n \n # Draw raw left/right lines on road image\n draw_lines(image_wk, left_lines, (255,0,255), 2)\n draw_lines(image_wk, right_lines, (0,255,0), 2)\n \n # Output road image with drawn raw lines and lists of left/right line coordinates\n return (image_wk, left_lines, right_lines)", "def draw_lane_on_img(undist, warped, left_fitx, right_fitx, ploty, Minv, curvature, distance):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Draw lane boundary\n left_lane = np.int32(pts_left)\n right_lane = np.int32(pts_right)\n color_warp = cv2.polylines(color_warp, [left_lane], False, (255,0,0), 16)\n color_warp = cv2.polylines(color_warp, [right_lane], False, (0, 0, 255), 16)\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.4, 0)\n\n # Write out the curvature and relative position\n curv_info = \"Radius of Curvature ={0:.2f}(m).\".format(curvature)\n if distance > 0:\n pos_info = \" Car is {0:.2f}m right of center.\".format(distance)\n else:\n pos_info = \" Car is {0:.2f}m left of center.\".format(-distance)\n info = curv_info + pos_info\n cv2.putText(result, info, (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, bottomLeftOrigin=False)\n\n return result", "def process_image(image):\n \n # (step 1) get gray image\n gray = grayscale(image)\n \n # (step 2) do gaussian blur with kernel size is 3\n blur_gray = gaussian_blur(gray, 3)\n \n # (step 3) do canny edge detction with low 50 and hight 150\n canny_edges = canny(blur_gray, 50, 150)\n \n # (step 4) region of interset\n imshape = image.shape\n left_bottom = (50,imshape[0])\n right_bottom = (imshape[1]-50,imshape[0])\n left_top = (420, 330)\n right_top = (imshape[1]-420, 330)\n # used later to discard lines which are out of the ROI\n polygon = Polygon([(50,imshape[0]+1),(imshape[1]-50,imshape[0]+1), (imshape[1]-420, 329), (420, 329)])\n vertices = np.array([[left_bottom,left_top, right_top, right_bottom]], dtype=np.int32)\n masked_edge = region_of_interest(canny_edges, vertices)\n \n # (step 5) get lane lines from hough transform\n rho = 2\n theta = np.pi/18 \n threshold = 15\n min_line_length = 10\n max_line_gap = 20\n lines = hough_lines(masked_edge, rho, theta, threshold, min_line_length, max_line_gap)\n \n # (step 6) seperate left and right lines\n left_lines = []\n right_lines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n if y1 > y2:\n temp_line = [x1,y1,x2,y2]\n if x2 != x1:\n m = (float(y2) - float(y1)) / (float(x2) - float(x1))\n else:\n m = 1000 # it will be dicarded, any high value will work\n temp_line.append(m)\n if x1 < x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n else:\n temp_line = [x2,y2,x1,y1]\n if x2 != x1:\n m = (float(y1) - float(y2)) / (float(x1) - float(x2))\n else:\n m = 1000\n temp_line.append(m)\n if x1 > x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n \n # (step 7) get left and right lines slopes, can be done with step 6 although\n left_slop = []\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; \n if x1 != x2:\n left_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_left_slop = sum(left_slop)/len(left_slop) # not used yet\n \n right_slop = []\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; \n if x1 != x2:\n right_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_right_slope = sum(right_slop)/len(right_slop) # not used yet\n \n \n # (step 8) delete left lines which deviate from thersold_s slope\n thersold_s = 0.4\n delet_left_index = []\n i = 0\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; m = left_line[4]; \n if abs(m) < thersold_s:\n delet_left_index.append(i)\n i=i+1\n for i in range((len(delet_left_index)-1), -1, -1):\n del left_lines[delet_left_index[i]]\n \n # (step 9) delete right lines which deviate from average slope\n delet_index_right = []\n i = 0\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; m = right_line[4]; \n if abs(m) < thersold_s:\n delet_index_right.append(i)\n i=i+1\n for i in range((len(delet_index_right)-1), -1, -1):\n del right_lines[delet_index_right[i]]\n \n # (step 10) extrapolate left and right lines\n left_line_draw = True\n x_lefts = []\n y_lefts = []\n for line in left_lines:\n x1, y1, x2, y2, m = line\n x_lefts.append(x1)\n x_lefts.append(x2) \n y_lefts.append(y1)\n y_lefts.append(y2)\n \n if len(x_lefts) > 0:\n slope_left, c_left = np.polyfit(x_lefts, y_lefts, 1)\n else:\n slope_left, c_left = 1, 1\n left_line_draw = False\n \n right_line_draw = True\n x_rights = []\n y_rights = []\n for line in right_lines:\n x1, y1, x2, y2, m = line\n x_rights.append(x1)\n x_rights.append(x2)\n y_rights.append(y1)\n y_rights.append(y2)\n if len(x_rights) > 0:\n slope_right, c_right = np.polyfit(x_rights, y_rights, 1)\n else:\n slope_right, c_right = 1, 1\n right_line_draw = False\n \n y1_left = 530 # again hardcoded values, from ROI\n y2_left = 330 # again hardcoded values, from ROI\n x1_left = int((y1_left - c_left) / slope_left)\n x2_left = int((y2_left - c_left) / slope_left)\n \n y1_right = 530 # again hardcoded values, from ROI\n y2_right = 330 # again hardcoded values, from ROI \n x1_right = int((y1_right - c_right) / slope_right)\n x2_right = int((y2_right - c_right) / slope_right)\n \n # (step 11) check if left/right line is out of ROI\n left_point1 = Point(x1_left, y1_left)\n left_point2 = Point(x2_left, y2_left)\n \n right_point1 = Point(x1_right, y1_right)\n right_point2 = Point(x2_right, y2_right)\n \n if polygon.contains(left_point1) and polygon.contains(left_point2):\n left_line_draw = True\n else:\n #print (\"left line out\", left_point1, left_point2)\n left_line_draw = False\n \n if polygon.contains(right_point1) and polygon.contains(right_point2):\n right_line_draw = True\n else:\n #print (\"right line out\", right_point1, right_point2)\n right_line_draw = False\n \n \n # (step 12) draw lines\n line_image = np.copy(image)\n # Draw the right and left lines on image\n if left_line_draw:\n cv2.line(line_image, (x1_left, y1_left), (x2_left, y2_left), (255,0,0),5)\n if right_line_draw:\n cv2.line(line_image, (x1_right, y1_right), (x2_right, y2_right), (255,0,0),5)\n \n # Create a \"color\" binary image to combine with line image\n color_edges = np.dstack((masked_edge, masked_edge, masked_edge)) \n \n # Draw the lines on the edge image\n lines_edges = cv2.addWeighted(color_edges, 0.4, line_image, 1, 0) \n #plt.imshow(lines_edges)\n #plt.show()\n return lines_edges", "def get_landmarks(self, image): # from https://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/\n # Ask the detector to find the bounding boxes of each face. The 1 in the\n # second argument indicates that we should upsample the image 1 time. This\n # will make everything bigger and allow us to detect more faces.\n detections = self.detector(image, 1)\n if len(detections) < 1: # Number of faces detected = 0\n # print(\"Number of faces detected: {}\".format(len(detections)))\n return None\n # Draw Facial Landmarks with the predictor class\n shape = self.predictor(image, detections[0])\n xlist = []\n ylist = []\n for i in range(68): # Store X and Y coordinates in two lists\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n\n landmarks_vectorised = []\n landmarks_vectorised = self.our_ft_landmark(xlist, ylist)# Extaraction des features\n\n xmean = np.mean(xlist)\n ymean = np.mean(ylist)\n xcentral = [(x-xmean) for x in xlist]\n ycentral = [(y-ymean) for y in ylist]\n \n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(w)\n landmarks_vectorised.append(z)\n # landmarks_vectorised.append(x)\n # landmarks_vectorised.append(y)\n meannp = np.asarray((ymean, xmean))\n coornp = np.asarray((z, w))\n dist = np.linalg.norm(coornp-meannp)# Distance euclidienne\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi))# Calcule de l'ongle entre le moyenne et un point\n\n return landmarks_vectorised", "def pipeline(self,img,debug=0):\n\t\timg = self.cam.undist(img)\n\t\t#get warped binary image\n\t\tbinary_warped = self.cam.warp(Image(img).binary_th())\n\t\tbw_shape = binary_warped.shape\n\t\t\n\t\tif (self.leftLine.detected == True and self.rightLine.detected == True):\n\t\t\tself.quick_search(binary_warped,debug)\n\t\telse:\n\t\t\tself.blind_search(binary_warped,debug)\n\t\n\t\tif (self.leftLine.fit!=None and self.rightLine.fit!=None):\n\t\t\tpolygon = self.fill_lane(bw_shape)\n\t\t\tunwarped_polygon = self.cam.unwarp(polygon)\n\t\t\t# calculate position of lane's center \n\t\t\ttemp = np.nonzero(unwarped_polygon[-1,:,1])[0]\n\t\t\tleft, right = temp[0], temp[-1]\n\t\t\tself.center = (int(bw_shape[1]/2) - (int((right-left)/2)+int(left)))*7.4/1280\n\t\t\timg_lines = weighted_img(unwarped_polygon,img, α=1, β=0.5, λ=0.)\n\t\t\t# write text on image\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\ttext1 = 'Radius of Curvature: {:.0f}m'.format(np.mean((self.leftLine.radius, self.rightLine.radius)))\n\t\t\ttext2 = 'Distance is {:.2f}m {} of center'.format(abs(self.center), 'left' if self.center<0 else 'right')\n\n\t\t\tcv2.putText(img_lines, text1, (100,100), font, 1,(255,255,255),2)\n\t\t\tcv2.putText(img_lines, text2 ,(100,140), font, 1,(255,255,255),2)\n\t\t\t\n\t\t\tif (debug==1):\n\t\t\t\tshow_2gr(polygon, unwarped_polygon)\n\t\t\t\tshow_2gr(binary_warped, unwarped_polygon)\n\n\t\t\treturn img_lines\n\n\t\telse:\n\t\t\t# no lines detected and not fit available: return original image\n\t\t\t# without lines\n\t\t\treturn img", "def find_lane_fast(img, left_fit, right_fit):\n\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n margin = 150\n left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (\n nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))\n right_lane_inds = (\n (nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (\n nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n\n return left_fit, right_fit", "def laser_detector(out_l_x, out_l_y):\n global screen_x_long\n global screen_y_long\n while True:\n ret, frame2 = cap.read()\n time.sleep(0.5)\n crop_img2 = frame2[SCREEN_Y_TOP:SCREEN_Y_BOT, SCREEN_X_TOP:SCREEN_X_BOT]\n hsv_image2 = cv2.cvtColor(crop_img2, cv2.COLOR_BGR2HSV)\n laser(hsv_image2)\n laser_str_el = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))\n laser_str_el_2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n laser_close_morphed = cv2.morphologyEx(channels['laser'],\n cv2.MORPH_CLOSE,\n laser_str_el\n )\n laser_morphed = cv2.morphologyEx(laser_close_morphed,\n cv2.MORPH_OPEN,\n laser_str_el_2\n )\n\n blur = cv2.GaussianBlur(laser_morphed, (7, 7), 4, 4)\n\n lasers = cv2.HoughCircles(blur, cv.CV_HOUGH_GRADIENT, 2.5, 720 / 2,\n param1=10, param2=4, minRadius=4,\n maxRadius=10\n )\n if lasers is not None:\n lasers = np.uint16(np.around(lasers))\n for i in lasers[0, :]:\n print \"lasers!\"\n # draw the outer circle\n cv2.circle(crop_img, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(crop_img, (i[0], i[1]), 2, (0, 0, 255), 3)\n x_l = ((i[0]) / screen_x_long) * WIDTH\n y_l = HEIGHT - (((i[1]) / screen_y_long) * HEIGHT)\n if laserT:\n out_l_x.put(x_l)\n out_l_y.put(y_l)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n print \"Exiting Background Thread: Laser detector\"", "def LineDetection(image,color,colorformat=\"rgb\",nbPoints=20):\n\n # Shape of the image\n height = image.shape[0]\n width = image.shape[1]\n\n # Initialization of point list\n points = [(0,0)]\n\n # Color choise\n if color == 'BLACK' or color == 'black':\n color = BLACK\n elif color == 'WHITE' or color == 'white':\n color = WHITE\n elif color == 'RED' or color == 'red':\n color = RED\n elif color == 'GREEN' or color == 'green':\n color = GREEN\n elif color == 'BLUE' or color == 'blue':\n color = BLUE \n elif color == 'YELLOW' or color == 'yellow':\n color = YELLOW\n elif color == 'ORANGE' or color == 'orange':\n color = ORANGE \n else :\n color = np.fliplr(np.uint8(color)) # RGB to BGR convertion\n\n if colorformat == 'HSV' or colorformat == 'hsv':\n color = np.fliplr(color) # BGR to RGB convertion for hsv conversion\n color = cv2.cvtColor(np.array([color]), cv2.COLOR_BGR2HSV)[0]\n\n # Moment calculation,for nbPoints strip, of the mask to find the center of the color\n for i in range(height//nbPoints,height,height//nbPoints):\n strip = image[i-height//nbPoints:i]\n mask = cv2.inRange(strip,color[0],color[1])\n M = cv2.moments(mask)\n if M['m00'] > 0:\n cx = int (M[ 'm10' ] /M[ 'm00' ] )\n cy = int (M[ 'm01' ] /M[ 'm00' ] )\n points.append((cx,cy+i-height//nbPoints))\n\n return points[::-1]\t# Return reverse list", "def _get_lines(fname):\n @_adapt_rgb(_skimage.color.adapt_rgb.hsv_value)\n def sobel_hsv(image):\n return _filters.sobel(image)\n\n # read in image file\n data = _skimageio.imread(fname)\n\n # select default ranges for the first 4 lines (this may vary from site to site)\n # possibly make these input parameters\n l1_range = data[0:27, 0:850, :]\n l2_range = data[28:55, 0:500, :]\n l3_range = data[56:83, 0:350, :]\n l4_range = data[84:111, 0:350, :]\n\n # Look for a right edge in the image in the default ranges which\n # would indicate an end of the overlay in the x direction\n intensity_limit = 2\n l1_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l1_range)),\n axis=2) < intensity_limit\n l2_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l2_range)),\n axis=2) < intensity_limit\n l3_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l3_range)),\n axis=2) < intensity_limit\n l4_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l4_range)),\n axis=2) < intensity_limit\n\n # try to adjust the default ranges if an edge was found\n sumlim = 25\n try:\n l1_right_edge = 0 + \\\n _np.where(_np.sum(l1_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l1_right_edge = 850\n if l1_right_edge < 2:\n l1_right_edge = 850\n\n try:\n l2_right_edge = 0 + \\\n _np.where(_np.sum(l2_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l2_right_edge = 500\n if l2_right_edge < 2:\n l2_right_edge = 500\n\n try:\n l3_right_edge = 0 + \\\n _np.where(_np.sum(l3_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l3_right_edge = 350\n if l3_right_edge < 2:\n l3_right_edge = 350\n\n try:\n l4_right_edge = 0 + \\\n _np.where(_np.sum(l4_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l4_right_edge = 350\n if l4_right_edge < 2:\n l4_right_edge = 350\n\n # extract an array for each of the first four lines\n line1 = data[0:27, :l1_right_edge, :]\n line2 = data[28:55, :l2_right_edge, :]\n line3 = data[56:83, :l3_right_edge, :]\n line4 = data[84:111, :l4_right_edge, :]\n \n return line1, line2, line3, line4", "def find_landmarks(self, image, detection):\n\n try:\n shape = self.shape_predictor(image, detection)\n coords = np.zeros((68, 2))\n\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n\n return coords\n\n except RuntimeError:\n return None", "def draw_lines(img, lines, color=[0, 0, 255], thickness=10):\n \n yFinal = 540 # tweak these values as per the frame size\n yIni = 350\n xPlus = []\n yPlus = []\n xMinus = []\n yMinus= []\n slope_range = 0.2\n\n if lines is not None:\n for line in lines:\n if line is not None:\n for x1,y1,x2,y2 in line:\n # check slope \n slope = (y2-y1)/(x2-x1)\n\t\t \n \t\t # Collect all points with + ve slope (right lane)\n if (slope > slope_range):\n xPlus.append(x1)\n xPlus.append(x2)\n yPlus.append(y1)\n yPlus.append(y2)\n\n # Collect all points with - ve slope (left lane)\n elif ((slope) < (-slope_range)):\n xMinus.append(x1)\n xMinus.append(x2)\n yMinus.append(y1)\n yMinus.append(y2)\n # If out of range, lists defined in beginning of this function will be empty \n else:\n continue\n \n # draw right lane\n x1,y1,x2,y2 = fit_line(xPlus, yPlus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color, thickness) \n\n # draw left lane\n x1,y1,x2,y2 = fit_line(xMinus, yMinus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color,thickness)", "def lane(self, mask, win_color = None):\n\n # the nonzero point\n solid = np.nonzero(mask)\n sx, sy = solid[1], solid[0]\n\n # make a image to draw on\n out_img = np.dstack([np.zeros_like(mask)]*3)*255\n if self.fit is None:\n # get the intial poly line for window sliding\n\n # get the midpoint for both line, expecting it shows up in the lower half\n self.h, self.w = mask.shape\n self.midpoint = self.w//2\n self.win_height = self.h//self.nb_win\n\n curv_head = self.h//self.frac\n histogram = np.sum(mask[:curv_head, :], axis = 0)\n mid_l = np.argmax(histogram[:self.midpoint])\n mid_r = np.argmax(histogram[self.midpoint:]) + self.midpoint\n\n # the indice for solid pixel in left and right\n l_lane_idc = []\n r_lane_idc = []\n\n # slide the windows down up\n btm = self.h\n for n in range(self.nb_win):\n # right window\n ul_l = (mid_l - self.half, btm - self.win_height)\n lr_l = (mid_l + self.half, btm)\n\n # left window\n ul_r = (mid_r - self.half, btm - self.win_height)\n lr_r = (mid_r + self.half, btm)\n\n\n # draw the retangle on the image\n if win_color:\n cv2.rectangle(out_img, lr_l, ul_l, win_color, 2)\n cv2.rectangle(out_img, lr_r, ul_r, win_color, 2)\n\n\n # the indice within window\n within_l = ((sx>=ul_l[0]) & \\\n (sx<=lr_l[0]) & \\\n (sy>=ul_l[1]) & \\\n (sy<=lr_l[1])).nonzero()[0]\n\n within_r = ((sx>=ul_r[0]) & \\\n (sx<=lr_r[0]) & \\\n (sy>=ul_r[1]) & \\\n (sy<=lr_r[1])).nonzero()[0]\n\n # append to the lane\n l_lane_idc.append(within_l)\n r_lane_idc.append(within_r)\n\n if len(within_r) > self.minpix:\n mid_r = np.int(np.mean(sx[within_r]))\n if len(within_l) > self.minpix:\n mid_l = np.int(np.mean(sx[within_l]))\n btm -= self.win_height\n\n # concatenate the windows\n l_lane_idc = np.concatenate(l_lane_idc)\n r_lane_idc = np.concatenate(r_lane_idc)\n try:\n self.fit = [np.polyfit(sy[l_lane_idc], sx[l_lane_idc], 2),\n np.polyfit(sy[r_lane_idc], sx[r_lane_idc], 2)]\n except:\n return out_img\n\n\n else:\n # if we've fitted the lane, use that as guide\n l_fit, r_fit = self.fit\n l_lane_idc = ((sx >= np.polyval(l_fit, sy) - self.half) &\n (sx <= np.polyval(l_fit, sy) + self.half)).nonzero()[0]\n r_lane_idc = ((sx >= np.polyval(r_fit, sy) - self.half) &\n (sx <= np.polyval(r_fit, sy) + self.half)).nonzero()[0]\n\n\n curv_head = self.h//self.frac\n l_curv_count = np.sum((sy >= curv_head) & (sx <= self.midpoint))\n r_curv_count = np.sum((sy >= curv_head) & (sx >= self.midpoint))\n\n if l_curv_count >= self.curv_count:\n try: self.fit[0] = np.polyfit(sy[l_lane_idc], sx[l_lane_idc], 2)\n except: pass\n if r_curv_count >= self.curv_count:\n try: self.fit[1] = np.polyfit(sy[r_lane_idc], sx[r_lane_idc], 2)\n except: pass\n\n # draw the lane area\n l_fit, r_fit = self.fit\n y_cord = np.linspace(0, self.h - 1, self.h)\n lane_l = np.polyval(l_fit, y_cord)\n lane_r = np.polyval(r_fit, y_cord)\n\n\n if not win_color:\n pts_l = np.array([np.vstack([lane_l, y_cord]).T])\n pts_r = np.array([np.flipud(np.vstack([lane_r, y_cord]).T)])\n\n pts = np.hstack((pts_l, pts_r))\n cv2.fillPoly(out_img, np.int_(pts), [0, 100, 0])\n\n # draw red on left\n out_img[sy[l_lane_idc], sx[l_lane_idc]] = RED\n # draw blue on right\n out_img[sy[r_lane_idc], sx[r_lane_idc]] = BLUE\n\n\n # put text showing meters away center and radius\n l_btm = np.polyval(l_fit, self.h)\n r_btm = np.polyval(r_fit, self.h)\n mpp = self.lane_width/(r_btm - l_btm) # meters per pixel\n\n mid_lane = int((r_btm + l_btm)/2)\n dev = (self.midpoint - mid_lane)\n radius = np.mean(self.curvature(mpp))\n\n side = ''\n side = 'L' if dev < 0 else 'R'\n dev_text = (\"%.2fm %s\"%(np.abs(mpp*dev), side))\n radius_text = (\"RADIUS %.2fm\"%(radius)) if radius < 2000 else 'STRAIGHT'\n\n (dev_w, dev_h), _ = cv2.getTextSize(dev_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 2)\n\n (radius_w, radius_h), _ = cv2.getTextSize(radius_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 3)\n\n\n dev_org = (int(mid_lane + 2*dev - dev_w//2), self.h - 30)\n radius_org = (int(mid_lane - radius_w//2), self.h - 80)\n\n\n\n cv2.line(out_img, (mid_lane, self.h - 20),\n (mid_lane, self.h - 40 - dev_h),\n color = [255,255,255], thickness = 3)\n\n cv2.putText(out_img, radius_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 3,\n org = radius_org, color = [0, 0, 0])\n\n cv2.putText(out_img, dev_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 2,\n org = dev_org, color = [0, 0, 0])\n\n return out_img", "def calculate_hit_box_points_simple(image):\r\n left_border = 0\r\n good = True\r\n while good and left_border < image.width:\r\n for row in range(image.height):\r\n pos = (left_border, row)\r\n pixel = image.getpixel(pos)\r\n if type(pixel) is int or len(pixel) != 4:\r\n raise TypeError(\"Error, calculate_points called on image not in RGBA format\")\r\n else:\r\n if pixel[3] != 0:\r\n good = False\r\n break\r\n if good:\r\n left_border += 1\r\n\r\n right_border = image.width - 1\r\n good = True\r\n while good and right_border > 0:\r\n for row in range(image.height):\r\n pos = (right_border, row)\r\n pixel = image.getpixel(pos)\r\n if pixel[3] != 0:\r\n good = False\r\n break\r\n if good:\r\n right_border -= 1\r\n\r\n top_border = 0\r\n good = True\r\n while good and top_border < image.height:\r\n for column in range(image.width):\r\n pos = (column, top_border)\r\n pixel = image.getpixel(pos)\r\n if pixel[3] != 0:\r\n good = False\r\n break\r\n if good:\r\n top_border += 1\r\n\r\n bottom_border = image.height - 1\r\n good = True\r\n while good and bottom_border > 0:\r\n for column in range(image.width):\r\n pos = (column, bottom_border)\r\n pixel = image.getpixel(pos)\r\n if pixel[3] != 0:\r\n good = False\r\n break\r\n if good:\r\n bottom_border -= 1\r\n\r\n # If the image is empty, return an empty set\r\n if bottom_border == 0:\r\n return []\r\n\r\n def _check_corner_offset(start_x, start_y, x_direction, y_direction):\r\n\r\n bad = False\r\n offset = 0\r\n while not bad:\r\n y = start_y + (offset * y_direction)\r\n x = start_x\r\n for count in range(offset + 1):\r\n my_pixel = image.getpixel((x, y))\r\n # print(f\"({x}, {y}) = {pixel} | \", end=\"\")\r\n if my_pixel[3] != 0:\r\n bad = True\r\n break\r\n y -= y_direction\r\n x += x_direction\r\n # print(f\" - {bad}\")\r\n if not bad:\r\n offset += 1\r\n # print(f\"offset: {offset}\")\r\n return offset\r\n\r\n def _r(point, height, width):\r\n return point[0] - width / 2, (height - point[1]) - height / 2\r\n\r\n top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)\r\n top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)\r\n bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)\r\n bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)\r\n\r\n p1 = left_border + top_left_corner_offset, top_border\r\n p2 = (right_border + 1) - top_right_corner_offset, top_border\r\n p3 = (right_border + 1), top_border + top_right_corner_offset\r\n p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset\r\n p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)\r\n p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)\r\n p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset\r\n p8 = left_border, top_border + top_left_corner_offset\r\n\r\n result = []\r\n\r\n h = image.height\r\n w = image.width\r\n\r\n result.append(_r(p7, h, w))\r\n if bottom_left_corner_offset:\r\n result.append(_r(p6, h, w))\r\n\r\n result.append(_r(p5, h, w))\r\n if bottom_right_corner_offset:\r\n result.append(_r(p4, h, w))\r\n\r\n result.append(_r(p3, h, w))\r\n if top_right_corner_offset:\r\n result.append(_r(p2, h, w))\r\n\r\n result.append(_r(p1, h, w))\r\n if top_left_corner_offset:\r\n result.append(_r(p8, h, w))\r\n\r\n # Remove duplicates\r\n result = tuple(dict.fromkeys(result))\r\n\r\n return result", "def find_lanes_from_scratch(binary_warped, nwindows=9, margin=100, minpix=50, draw_lanes=True):\n # Find histogram peaks\n histogram = hist(binary_warped)\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) # create output image to draw on (not necessary)\n midpoint = np.int(histogram.shape[0]//2) # 640\n leftx_base = np.argmax(histogram[:midpoint]) # find index of left peak (indicates ll)\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint # find index of right peak (indicates rl)\n \n # Sliding windows\n window_height = np.int(binary_warped.shape[0]//nwindows) # 80\n nonzero = binary_warped.nonzero() # a tuple for x and y\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_inds = []\n right_lane_inds = []\n for window in range(nwindows): # index 0 to 8\n win_y_low = binary_warped.shape[0] - (window+1)*window_height \n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n if draw_lanes:\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) # bottom left to top right, in green, with thickness 2\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\n good_left_inds = ((nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high) & \n (nonzeroy >= win_y_low ) & (nonzeroy < win_y_high)).nonzero()[0]\n good_right_inds = ((nonzerox >= win_xright_low) & (nonzerox < win_xright_high) & \n (nonzeroy >= win_y_low ) & (nonzeroy < win_y_high)).nonzero()[0]\n left_lane_inds.append(good_left_inds) # indices\n right_lane_inds.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = int(np.mean(nonzerox[good_left_inds]))\n # print(leftx_current)\n if len(good_right_inds) > minpix:\n rightx_current = int(np.mean(nonzerox[good_right_inds]))\n \n # Find indices of left and right lane lines\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit polynomial of second degree\n left_fit = np.polyfit(lefty, leftx, deg=2)\n right_fit = np.polyfit(righty, rightx, deg=2)\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])\n\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n detected_flag = True\n \n # Visualize\n if draw_lanes:\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n for index in range(out_img.shape[0]-1):\n cv2.line(out_img, (int(left_fitx[index]), int(ploty[index])), (int(left_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)\n cv2.line(out_img, (int(right_fitx[index]), int(ploty[index])), (int(right_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)\n return left_fitx, right_fitx, left_fit, right_fit, ploty, out_img, detected_flag", "def detect(self, img):\n # 1. color filter\n lane_img = self.color_filter(img.copy())\n # 2. gaussian blur\n lane_img = self.gaussian_blur(lane_img)\n # 3.canny edge detection\n lane_img = self.canny(lane_img)\n # 4. region of interest crop\n lane_img = self.region_of_interest(lane_img)\n # 5. hough lines\n lane_img = self.hough_lines(lane_img)\n # 6. overlay lane over original image\n result_img = weighted_img(lane_img, img)\n\n return result_img", "def compute_lane_statistics(self, warped):\r\n ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])\r\n left_fit, right_fit = self.fit_dict['left_fit'], self.fit_dict['right_fit']\r\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\r\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\r\n y_eval = np.max(ploty)\r\n # Define conversions in x and y from pixels space to meters\r\n ym_per_pix = 30 / 720 # meters per pixel in y dimension\r\n xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\r\n # Fit new polynomials to x,y in world space\r\n left_fit_cr = np.polyfit(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\r\n right_fit_cr = np.polyfit(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\r\n # Calculate the new radii of curvature\r\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\r\n 2 * left_fit_cr[0])\r\n right_curverad = (\r\n (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\r\n 2 * right_fit_cr[0])\r\n\r\n # compute vehicle offset from center\r\n image_center_x = (warped.shape[1]/2) * xm_per_pix\r\n y_eval_m = y_eval * ym_per_pix\r\n left_fitx = left_fit_cr[0] * y_eval_m ** 2 + left_fit_cr[1] * y_eval_m + left_fit_cr[2]\r\n right_fitx = right_fit_cr[0] * y_eval_m ** 2 + right_fit_cr[1] * y_eval_m + right_fit_cr[2]\r\n lane_center_x = (left_fitx + right_fitx)/2.\r\n vehicle_offset = image_center_x - lane_center_x\r\n\r\n return left_curverad, right_curverad, vehicle_offset", "def facial_landmarks(img):\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('/home/aditya/Desktop/stuff/ComputerVision-CMSC733/FaceSwap/scripts/traditional/shape_predictor_68_face_landmarks.dat')\n grayscale_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n rectangles = detector(grayscale_image, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n facial_points = []\n no_of_faces = len(rectangles)\n for (i, rect) in enumerate(rectangles):\n\n shape = predictor(grayscale_image, rect)\n shape = face_utils.shape_to_np(shape)\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n for (x, y) in shape:\n cv2.circle(img, (x, y), 2, (0, 0, 255), -1)\n facial_points.append((x, y))\n\n return no_of_faces, facial_points", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary" ]
[ "0.6679829", "0.634663", "0.62997025", "0.6223093", "0.621849", "0.6095733", "0.60441035", "0.59959286", "0.59956324", "0.5957144", "0.59263897", "0.5916754", "0.5900934", "0.587384", "0.58581173", "0.5833918", "0.5820731", "0.58049595", "0.5782126", "0.5764281", "0.575516", "0.5736101", "0.5735095", "0.5733465", "0.5694543", "0.56783587", "0.56765395", "0.56739724", "0.5672463", "0.56661636" ]
0.7383569
0
Use the DBSCAN clustering algorithm in order to remove possible outliers from the points detected as laser in the object. We are basically enforcing continuity in the laser line on the object, i.e. looking for a dense cluster of pixels. Interesting points are the ones whose label is not 1, i.e. the ones belonging to a cluster that is not an outlier one.
def remove_obj_outliers(self, points: np.ndarray) -> Optional[np.ndarray]: dbscan_result = self.dbscan.fit(points[:, 0]) mask = dbscan_result.labels_ != -1 return np.expand_dims(points[:, 0][mask], axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DBSCAN(M, eps, min_points):\n colors = ['r', 'g', 'b', 'y', 'c', 'm'] # tablica kolorow - inny kolor dla kazdego clustera\n checked = np.zeros(M.shape[\n 0]) # tablica sprawdzonych punktow wypelniona zerami jesli punkt zostal sprawdzony zmieniana jest wartosc na 1print(checked)\n classification = np.empty(M.shape[0])\n classification.fill(0)\n cluster_count = 0\n for i in range(0, len(colors)): # for odpowiedzialny do tworzenia clusterow (kazdy cluster inny kolor)\n for j in range(0, len(checked)): # szukanie pierwszego niesprawdzonego punktu\n if checked[j] != 1:\n seeds = cluster(M, j, eps)\n startpoint = j\n if min_points > len(seeds):\n checked[\n startpoint] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i nic z nim dalej nie robi bo jest do dupy\n\n if min_points <= len(seeds):\n plt.plot(M[startpoint, 0], M[startpoint, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[startpoint] = 1\n classification[startpoint] = i + 1\n break # jesli znaleziono niesprawdzony punkt wychodzi z petli\n while len(seeds) > 0:\n\n point = seeds[0] # wybranie za kolejny punkt pierwszego punktu z tablicy seeds\n results = cluster(M, point, eps) # zapisanie punktow ktore spelniaja warunek z neighborhood\n if checked[point] != 1:\n if min_points > len(results) and (classification[point] == 0 or classification[point] == -1):\n checked[\n point] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i ustala go jako border\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=8)\n classification[point] = -(i + 1)\n if min_points <= len(results):\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[point] = 1\n classification[point] = i + 1\n for k in range(0, len(results)):\n result_point = results[k]\n seeds.append(\n result_point) # dodanie do tablicy seeds punktow ktore znajdowaly sie w sasiedztwie punktu point\n seeds.remove(seeds[0]) # usuwa juz sprawdzony element z tablicy seeds\n if np.sum(checked) == M.shape[\n 0]: # jesli juz wszystkie punkty zostaly sprawdzone to wychodzi z petli - po tym wszystkie clustery powinny byc zrobione\n break\n return plt.show()", "def fit_dbscan(X_fit, eps=0.55, min_samples=8, plot=False, n_jobs=None):\n # Compute DBSCAN\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X_fit)\n\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n # labels[~core_samples_mask] = -1\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print(\n 'Estimated number of clusters: %d' % n_clusters_)\n print(\n 'Estimated number of noise points: %d' % n_noise_)\n\n # Black removed and is used for noise instead.\n unique_labels = set(labels)\n if plot:\n colors = [\n plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n\n plt.figure(figsize=(5, 5))\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n digits_proj = TSNE(random_state=1).fit_transform(X_fit)\n\n xy = digits_proj[class_member_mask & core_samples_mask]\n plt.plot(\n xy[:, 0], xy[:, 1],\n 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=12)\n\n xy = digits_proj[class_member_mask & ~core_samples_mask]\n plt.plot(\n xy[:, 0], xy[:, 1],\n 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n\n plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.show()\n\n labels = labels.astype('str')\n labels = np.asarray(list(map(lambda x: \"{:02d}\".format(int(x)), labels)))\n\n # for i in np.unique(labels):\n # print('Cluster %s count: ' % i, sum(labels == i))\n return labels, db", "def clustering_dbscan(self, df, conc_attr, eps=10, min_samples=2):\n\t\tX = df[[\"latitude\", \"longitude\"]].values\n\t\twt = df[conc_attr].values\n\n\t\tmetric = \"haversine\"\n\t\talgorithm = \"ball_tree\"\n\t\tmin_samples = 2\n\t\teps = eps/6371\n\n\t\t# without weight DBSCAN\n\t\tclustering = DBSCAN(eps=10/6371, min_samples=2, algorithm=algorithm, metric=metric).fit(np.radians(X))\n\t\t# with weight\n\t\t# clustering = DBSCAN(eps=10/6371, min_samples=2, algorithm=algorithm, metric=metric).fit(np.radians(X), sample_weight=wt)\n\n\t\tlabels = clustering.labels_\n\n\t\treturn X, wt, clustering, labels", "def remove_outliers(clusters):\n pixel_sums = {} \n outliers = []\n\n for cluster, nodes in clusters.items():\n if len(nodes) > 1:\n pixel_sums[cluster] = []\n for node in nodes:\n pixel_sums[cluster].append(sum(sum(extract_2D[node])))\n\n for cluster, psums in pixel_sums.items():\n med = np.median(psums)\n m_psums = [abs(x - med) for x in psums]\n mad = np.median(m_psums)\n \n if mad == 0:\n next \n else:\n for i, proj in enumerate(psums): \n z = 0.6745*(proj - med)/mad\n if abs(z) > 3.5:\n outliers.append((cluster, clusters[cluster][i]))\n\n clusters[\"outliers\"] = [o[1] for o in outliers]\n \n for outlier in outliers:\n cluster, node = outlier[0], outlier[1]\n clusters[cluster].remove(node)\n print('class_avg node {0} was removed from cluster {1} as an outlier'.format(node, cluster))", "def cluster_DBSCAN(self, data, eps=None, min_samples=None,\n n_clusters=None, maxiter=200):\n if min_samples is None:\n min_samples = self.Time.size // 20\n\n if n_clusters is None:\n if eps is None:\n eps = 0.3\n db = cl.DBSCAN(eps=eps, min_samples=min_samples).fit(data)\n else:\n clusters = 0\n eps_temp = 1 / .95\n niter = 0\n while clusters < n_clusters:\n clusters_last = clusters\n eps_temp *= 0.95\n db = cl.DBSCAN(eps=eps_temp, min_samples=15).fit(data)\n clusters = (len(set(db.labels_)) -\n (1 if -1 in db.labels_ else 0))\n if clusters < clusters_last:\n eps_temp *= 1 / 0.95\n db = cl.DBSCAN(eps=eps_temp, min_samples=15).fit(data)\n clusters = (len(set(db.labels_)) -\n (1 if -1 in db.labels_ else 0))\n warnings.warn(('\\n\\n***Unable to find {:.0f} clusters in '\n 'data. Found {:.0f} with an eps of {:.2e}'\n '').format(n_clusters, clusters, eps_temp))\n break\n niter += 1\n if niter == maxiter:\n warnings.warn(('\\n\\n***Maximum iterations ({:.0f}) reached'\n ', {:.0f} clusters not found.\\nDeacrease '\n 'min_samples or n_clusters (or increase '\n 'maxiter).').format(maxiter, n_clusters))\n break\n\n labels = db.labels_\n\n core_samples_mask = np.zeros_like(labels)\n core_samples_mask[db.core_sample_indices_] = True\n\n return labels, core_samples_mask", "def DBscan_clustering(self,d,s):\r\n print(colored(\"Performing agglomerative clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = DBSCAN(eps=d,min_samples=s,metric = 'euclidean').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\"%len(np.unique(self.labels)),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels", "def dbscan(self, data):\n self.reset_params()\n self.data = data\n self.labels = [NOISE_LABEL] * len(data)\n\n noise_cluster = Cluster('Noise')\n self.clusters.add(noise_cluster)\n\n count = 0\n for pidx, point in enumerate(data):\n if point not in self.visited:\n count += 1\n self.visited.append(point)\n neighbours, neighbour_indexes = self.query_region(point)\n if len(neighbours) < self.min_points:\n noise_cluster.add_point(point)\n self.labels[pidx] = NOISE_LABEL\n else:\n cluster_idx = len(self.clusters)\n name = \"cluster-{0}\".format(cluster_idx)\n new_cluster = Cluster(name)\n indexes = self.expand_cluster(new_cluster, point, neighbours, neighbour_indexes)\n self.labels[pidx] = cluster_idx\n for added_idx in indexes:\n self.labels[added_idx] = cluster_idx\n return", "def creating_clusters(filename):\n Years = ['2007','2008','2009','2010','2011','2012','2013','2014','2015','2016']\n x = []\n y = []\n location = []\n colors = []\n\n \"\"\"this loop will run and print for each year individually\"\"\"\n for yr in Years: \n df = pd.read_excel(filename, sheet_name = eval('yr'))\n table = df.iloc[:,[0,1,2,9,13,17]]\n \"\"\"removing any None values\"\"\"\n data = df.iloc[:,[0,9,13,17]].dropna()\n\n \"\"\"creating the row numbers\"\"\"\n data['row_num'] = np.arange(len(data))\n\n \"\"\"\n the average for all cities will be the sum of their clearance rates divided by three\n in order to do this, I'm using the row number to find the appropriate values since the indexs aren't are consecutive\n \"\"\"\n \n avg = []\n for i in range(len(data)):\n avg.append(float((data.loc[data['row_num'] == i]['Murder/Nonnegligent Manslaughter Clearance Rate'] + data.loc[data['row_num'] == i]['Aggravated Assault Clearance Rate'] + data.loc[data['row_num'] == i]['Robbery Clearance Rate'])/3))\n \n \"\"\"the averages are saved as a new column\"\"\"\n data['average'] = avg\n\n \"\"\"The model for DBSCAN\"\"\"\n model = DBSCAN(eps = 15000, min_samples=3).fit(data)\n print(yr + 's model is: ')\n print(model)\n print()\n\n outliers_df = pd.DataFrame(data)\n \n \"\"\"The groups of clusters\"\"\"\n print(yr + 's grouping is: ')\n print(Counter(model.labels_))\n print()\n\n t = table.dropna()\n print('The outlier cities for '+ yr +' are: ')\n print()\n\n\n \"\"\"Printing each outlier one at a time\"\"\"\n for i in outliers_df[model.labels_==-1]['Total Population']:\n print(t.loc[data['Total Population'] == i]['City'].to_string())\n \n \"\"\"Adding the x and y values and the colors to a list for the later plots\"\"\"\n colors.append(model.labels_)\n x.append(data.iloc[:,0])\n y.append(data.iloc[:,5])\n location.append(t.iloc[:,1] + \", \" + t.iloc[:,2])\n print()\n \n values = {}\n values['colors'] = colors\n values['x'] = x\n values['y'] = y\n values['location'] = location\n return values", "def run_DBScan_clustering(dataframe, eps=0.03, min_samples=3):\n dataframe2 = dataframe.copy(deep=True)\n\n # this will exclude a column from the dataframe\n # cleaned_data.loc[:, cleaned_data.columns != 'Unnamed: 0']\n\n clusterer = DBSCAN(eps=eps, min_samples=min_samples).fit(dataframe.loc[:, dataframe.columns != 'Unnamed: 0'])\n labels = clusterer.labels_\n\n user_list = dataframe.index.values.tolist()\n\n end_dict = {}\n for i in range(len(labels)):\n end_dict[user_list[i]] = labels[i]\n return end_dict", "def clustering_dbscan_sk(cloud: object, eps: int, min_samples: int) -> object:\n return DBSCAN(eps=eps, min_samples=min_samples).fit(cloud)", "def db_scanClustering(data, x_scaled, random_s, epsilon, mini_sample):\n np.random.seed(random_s)\n #Train the algo\n db = DBSCAN(eps=epsilon, min_samples=mini_sample, n_jobs=-1).fit(x_scaled)\n #Get each cluster\n labels = pd.DataFrame(db.labels_, index=data.index, columns=[\"Clusters\"])\n #Merge on main df\n data_clust = pd.merge(data, labels, left_index=True, right_index=True, how='left')\n return data_clust", "def test_dbscan_feature():\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n metric = 'euclidean'\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(X, metric=metric,\n eps=eps, min_samples=min_samples)\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=metric)\n labels = db.fit(X, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)", "def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )", "def getLabels(df, eps=3, min_samples=100):\n #instantiate dbscan\n db = DBSCAN(eps=eps, \n min_samples=min_samples, \n metric='euclidean', \n n_jobs=-1\n )\n \n #fit and predict to data\n db.fit_predict(df[['x', 'y']])\n \n #Returns the sorted unique elements of an array\n labels_unique = np.unique(db.labels_)\n #drop the -1 labels which are unlabeled\n labels_unique = labels_unique[labels_unique != -1]\n \n \n return db.labels_, labels_unique", "def ExtractDBSCAN(SetOfObjects, epsilon_prime):\n\n # Start Cluster_id at zero, incremented to '1' for first cluster\n cluster_id = 0\n for entry in SetOfObjects._ordered_list:\n if SetOfObjects._reachability[entry] > epsilon_prime:\n if SetOfObjects._core_dist[entry] <= epsilon_prime:\n cluster_id += 1\n SetOfObjects._cluster_id[entry] = cluster_id\n else:\n # This is only needed for compatibility for repeated scans.\n # -1 is Noise points\n SetOfObjects._cluster_id[entry] = -1\n else:\n SetOfObjects._cluster_id[entry] = cluster_id\n if SetOfObjects._core_dist[entry] <= epsilon_prime:\n # One (i.e., 'True') for core points #\n SetOfObjects._is_core[entry] = 1\n else:\n # Zero (i.e., 'False') for non-core, non-noise points #\n SetOfObjects._is_core[entry] = 0", "def dbscan(distmat, epsilon, minpoints):\n objs_indices = numpy.arange(distmat.shape[0])\n objs_clusterlabels = numpy.zeros(distmat.shape[0])\n objs_visited = numpy.zeros(distmat.shape[0]).astype(bool)\n\n distmask = (distmat > 0) & (distmat <= epsilon)\n neighbors = [distmask[i].nonzero()[0] for i in objs_indices]\n neighborhood_size = [len(n) for n in neighbors]\n\n # Iterate through objs in random order.\n numpy.random.shuffle(objs_indices)\n for neighbor_index_array in neighbors:\n numpy.random.shuffle(neighbor_index_array)\n\n clustercount = 0\n for objidx in objs_indices:\n if objs_visited[objidx]:\n continue\n objs_visited[objidx] = True\n if neighborhood_size[objidx] < minpoints:\n continue\n # The sample with current `objidx` is core point\n # of a new cluster. Label it with cluster 'id'.\n clustercount += 1\n objs_clusterlabels[objidx] = clustercount\n # Go through neighborhood of the identified core point.\n clustermember_indices = list(neighbors[objidx])\n for member_index in clustermember_indices:\n # If not in any cluster so far, add it to current:\n if not objs_clusterlabels[member_index]:\n objs_clusterlabels[member_index] = clustercount\n if objs_visited[member_index]:\n continue\n objs_visited[member_index] = True\n if neighborhood_size[member_index] < minpoints:\n continue\n # Object with current `member_index` is core point.\n # Add full neighborhood to `clustermember_indices`.\n clustermember_indices.extend(\n list(neighbors[member_index]))\n return objs_clusterlabels", "def filterNoise(points, thresh, sampleSize = 4):\n diff = points[:-1] - points[1:]\n mask = np.zeros((diff.shape[0] + 1), 'bool')\n avg_dist = np.zeros((diff.shape[0] + 1))\n \n for i in range(diff.shape[0]):\n cbin = ClusterBin(i, i + sampleSize)\n sample = cbin.getFrom(diff)\n avg_dist[i] = np.average(LA.norm(sample, axis=2))\n if (avg_dist[i] < thresh):\n mask[i:i + sampleSize] = True\n \n # noise = points[mask]\n plt.plot(avg_dist)\n plt.grid(True)\n plt.show()\n\n # noise_avg = np.average(noise, axis=0)\n # points[mask] = 0\n # first_noise = np.argmax(mask)\n # points[first_noise] = noise_avg\n # mask[first_noise] = False\n points = points[mask == False]\n return points", "def shoot(hdf5_file_name, minPts, sample_ID = 0, random_state = None, verbose = True): \n \n fileh = tables.open_file(hdf5_file_name, mode = 'r+')\n\n neighborhoods_indices = fileh.root.DBSCAN_group.neighborhoods_indices\n neighborhoods_indptr = fileh.root.DBSCAN_group.neighborhoods_indptr[:]\n\n neighbors_counts = fileh.root.DBSCAN_group.neighbors_counts[sample_ID]\n subsampled_indices = fileh.root.DBSCAN_group.subsamples_matrix[sample_ID]\n\n N_samples = neighborhoods_indptr.size - 1\n N_runs, N_subsamples = fileh.root.DBSCAN_group.subsamples_matrix.shape\n\n if not isinstance(sample_ID, int):\n raise ValueError(\"\\nERROR: DBSCAN_multiplex @ shoot:\\n\"\n \"'sample_ID' must be an integer identifying the set of subsampled indices \"\n \"on which to perform DBSCAN clustering\\n\") \n\n if (sample_ID < 0) or (sample_ID >= N_runs):\n raise ValueError(\"\\nERROR: DBSCAN_multiplex @ shoot:\\n\"\n \"'sample_ID' must belong to the interval [0; {}].\\n\".format(N_runs - 1))\n \n # points that have not been sampled are labelled with -2\n labels = np.full(N_samples, -2, dtype = int)\n # among the points selected for clustering, \n # all are initally characterized as noise\n labels[subsampled_indices] = - 1\n \n random_state = check_random_state(random_state)\n\n core_samples = np.flatnonzero(neighbors_counts >= minPts)\n \n index_order = np.take(core_samples, random_state.permutation(core_samples.size))\n\n cluster_ID = 0\n\n # Look at all the selected samples, see if they qualify as core samples\n # Create a new cluster from those core samples\n for index in index_order:\n if labels[index] not in {-1, -2}:\n continue\n\n labels[index] = cluster_ID\n\n candidates = [index]\n while len(candidates) > 0:\n candidate_neighbors = np.zeros(0, dtype = np.int32)\n for k in candidates:\n candidate_neighbors = np.append(candidate_neighbors, \n neighborhoods_indices[neighborhoods_indptr[k]: neighborhoods_indptr[k+1]])\n candidate_neighbors = np.unique(candidate_neighbors)\n\n candidate_neighbors = np.intersect1d(candidate_neighbors, subsampled_indices, assume_unique = True)\n \n not_noise_anymore = np.compress(np.take(labels, candidate_neighbors) == -1, candidate_neighbors)\n \n labels[not_noise_anymore] = cluster_ID\n\n # Eliminate as potential candidates the points that have already \n # been used to expand the current cluster by a trail \n # of density-reachable points\n candidates = np.intersect1d(not_noise_anymore, core_samples, assume_unique = True) \n \n cluster_ID += 1\n # Done with building this cluster. \n # \"cluster_ID\" is now labelling the next cluster.\n\n fileh.close()\n\n gc.collect()\n\n return core_samples, labels", "def dbscan(dataList, maxClusterNum, eps, minPts):\n unvisitedPt = list(dataList)\n clusterSizeList = []\n clusterSeedList = []\n lastClusterID = 0\n # lookup all unvisited Points\n while (len(unvisitedPt) != 0):\n idx = randint(0, len(unvisitedPt) - 1)\n cur = unvisitedPt[idx]\n cur.isVisited = True\n del unvisitedPt[idx]\n\n # Create new cluster if 'cur' pt. is core object\n neighborhood = findNeighbor(cur, dataList, eps)\n if (len(neighborhood) >= minPts):\n cur.label = lastClusterID\n clusterSeedList.append(cur)\n clusterSize = 1\n idx = -1\n # find all density-connected points(neighborhood)\n while (idx < len(neighborhood) - 1):\n idx += 1\n if (neighborhood[idx].isVisited):\n continue\n neighborhood[idx].isVisited = True\n unvisitedPt.remove(neighborhood[idx])\n nextNeighbor = findNeighbor(neighborhood[idx], dataList, eps)\n if (len(nextNeighbor) >= minPts):\n # extend neighborhood without duplicate\n neighborhood = neighborhood \\\n + [x for x in nextNeighbor if x not in neighborhood and not x.isVisited]\n if (neighborhood[idx].label == -1):\n neighborhood[idx].label = lastClusterID\n clusterSize += 1\n clusterSizeList.append((lastClusterID, clusterSize))\n lastClusterID += 1\n\n # Create labelConverter to remove extra clusters if exist.\n labelConverter = dict()\n labelConverter[-1] = -1\n if (lastClusterID <= maxClusterNum): # no need to remove extra clusters\n for i in range(maxClusterNum):\n labelConverter[i] = i\n else: # create labelConverter that remove extra clusters\n clusterSizeList.sort(key=itemgetter(1), reverse=True)\n for i in range(maxClusterNum):\n labelConverter[clusterSizeList[i][0]] = i\n for i in range(maxClusterNum, lastClusterID):\n labelConverter[clusterSizeList[i][0]] = -1\n # recluster(dataList, labelConverter, clusterSeedList, eps, minPts)\n return labelConverter", "def remove_outliers(self, data, cluster, linked_clusters, var, _cluster_sig_, _bud_sig_):\n\n buds = find_linked_buds(self, linked_clusters, cluster)\n\n linked_buds = []\n if np.size(buds) == 0:\n linked_buds = [False for link in linked_clusters]\n else:\n linked_buds = [[(link == bud) for bud in buds] for link in linked_clusters]\n linked_buds = [np.any(val) for val in linked_buds]\n\n remove = []\n for i in range(len(linked_buds)):\n if linked_buds[i]:\n remove.append(( np.asarray(var[i]) > _bud_sig_ ))\n else:\n remove.append(( np.asarray(var[i]) > _cluster_sig_ ))\n\n keep_clusters = []\n keep_var = []\n for j in range(len(remove)):\n if (remove[j] == False):\n keep_clusters.append(linked_clusters[j])\n keep_var.append(var[j])\n\n linked_clusters = keep_clusters\n var = keep_var\n\n return linked_clusters, var", "def getCluster(self, eps = None, minPts = None):\n #D = getDistanceMatrix()\n #print(\"Distance matrix completed, clustering in process\")\n clusters = DBSCAN(metric=Categorization.cosin_sim_pairs).fit_predict(np.arange(186696).reshape(-1, 1))\n print(\"Clustering completed, writing pickle file\")\n self.writeFile(clusters, \"clusters.pickle\")\n return clusters", "def denseBinsToClusters(candidates, plot=False, debug=False):\n graph = np.identity(len(candidates))\n for i in range(len(candidates)):\n for j in range(len(candidates)):\n graph[i, j] = int(neighbour(candidates[i], candidates[j]))\n # Find connected components in order to merge neighbouring bins\n nbConnectedComponents, components = scipy.sparse.csgraph.connected_components(\n graph, directed=False)\n if debug:\n print(graph)\n print(nbConnectedComponents, components)\n candidates = np.array(candidates)\n clusterAssignment = -1 * np.ones(data.shape[0])\n # For every cluster\n for i in range(nbConnectedComponents):\n # Get dense units of the cluster - 获取集群的密集单元\n cluster_dense_units = candidates[np.where(components == i)[0]]\n if debug:\n for v in cluster_dense_units:\n for z in v:\n print(z)\n clusterDimensions = {}\n for j in range(len(cluster_dense_units)):\n for k in range(len(cluster_dense_units[j])):\n if cluster_dense_units[j][k].dimension not in clusterDimensions:\n clusterDimensions[cluster_dense_units[j][k].dimension] = []\n clusterDimensions[cluster_dense_units[j][k].dimension].extend(cluster_dense_units[j][k].points)\n points = reduce(np.intersect1d, list(clusterDimensions.values()))\n clusterAssignment[points] = i\n if plot:\n pred = -1 * np.ones(data.shape[0])\n pred[points] = i\n plt.figure()\n plt.title(f'In yellow, clusters in {list(clusterDimensions.keys())} dimensions ')\n plt.scatter(data[:, 0], data[:, 1], c=pred)\n for g in grid[0]:\n plt.axvline(x=g, c='red', linestyle='--')\n for g in grid[1]:\n plt.axhline(y=g, c='red', linestyle='--')\n plt.show()\n if debug:\n print(clusterDimensions.keys(), points)\n return clusterAssignment", "def clustering_dbscan_o3d():\n pass", "def cluster_hdbscan(\n clusterable_embedding, min_cluster_size, viz_embedding_list\n):\n print(f\"min_cluster size: {min_cluster_size}\")\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=min_cluster_size, prediction_data=True\n ).fit(clusterable_embedding)\n labels = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,).fit_predict(\n clusterable_embedding\n )\n print(f\"found {len(np.unique(labels))} clusters\")\n clustered = labels >= 0\n print(f\"fraction clustered: {np.sum(clustered)/labels.shape[0]}\")\n for embedding in viz_embedding_list:\n plt.scatter(\n embedding[~clustered][:, 0],\n embedding[~clustered][:, 1],\n c=(0.5, 0.5, 0.5),\n s=10,\n alpha=0.5,\n )\n plt.scatter(\n embedding[clustered][:, 0],\n embedding[clustered][:, 1],\n c=labels[clustered],\n s=10,\n cmap=\"Spectral\",\n )\n plt.legend(labels)\n plt.show()\n\n return labels, clusterer", "def _remove_outliers(df, contamination):\n\n day_roam_distance = df[['day_roam_distance']]\n\n clf = IsolationForest(n_estimators=100, contamination=contamination, random_state=0, n_jobs=-1)\n outliers = clf.fit_predict(day_roam_distance)\n\n inline_data = df.loc[outliers == 1]\n\n return inline_data", "def omit_nans(self, data, label):\n maskarray=np.full(data.shape[0], True)\n masker=np.unique(np.argwhere(np.isnan(data))[:,0])\n maskarray[masker]=False\n traindata=data[maskarray,:,:,:]\n trainlabel=label[maskarray]\n return traindata, trainlabel", "def cluster_shrinkage_clustering(from_file):\n points = read_points(from_file)\n shuffle(points)\n S = similarity_matrix(points, similarity_measure=euclidean_distance)\n A = cluster(S, k=10, max_iter=1000)\n labels = [np.argmax(p) for p in A]\n xs, ys = zip(*points)\n \n return xs, ys, labels", "def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster", "def dbscan_(X, eps, min_samples, metric='euclidean'):\n f = DBSCAN(eps=eps,\n min_samples=min_samples,\n metric=metric)\n f.fit(X)\n return f.labels_, f.core_sample_indices_", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels" ]
[ "0.6458574", "0.62219197", "0.6213678", "0.61207443", "0.6074292", "0.6022414", "0.5965475", "0.5942092", "0.5927346", "0.5881949", "0.57391447", "0.5729232", "0.56970525", "0.5679967", "0.5674256", "0.5669259", "0.5664249", "0.5654197", "0.56165564", "0.5572699", "0.555027", "0.5498979", "0.53966284", "0.53961986", "0.5391222", "0.5343035", "0.5341764", "0.53403336", "0.5330921", "0.53299963" ]
0.6430791
1
Given an image and a list of coordinates of shape (n_points, 1, 2), return the RGB colors of those coordinates in the (0...1) range. Notice that OpenCV uses BGR instead of RGB by default, thus we need to flip the columns.
def get_colors(self, image: np.ndarray, coordinates: np.ndarray) -> np.ndarray: x = coordinates.squeeze(1) return np.flip(image[x[:, 1], x[:, 0]].astype(np.float64) / 255.0, axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def pts_filter_color(points):\n pts = np.array(points).tolist()\n # Get rid of all points behind camera\n pts_fil = []\n for pt in pts:\n if pt[2] > 0: \n pts_fil.append(pt)\n \n # get pix size for x distance\n pts_col = []\n for point in pts_fil: \n x = point[0]\n y = point[1]\n z = point[2]\n pix_width = (2 * z * np.tan(fov_width/2))/img_width\n pix_height = (2 * z * np.tan(fov_height/2))/img_height\n # Get row and column coordinates\n y_mod = img_width/2 + y/pix_height + height_offset\n x_mod = img_height/2 - x/pix_width + width_offset\n row = int(y_mod)\n col = int(x_mod)\n # Check if point is inside image bounds\n if 0 <= col < img_msg_now.width and 0 <= row < img_msg_now.height: \n rgb = img[row][col] # Get color of that row and column\n pts_col.append(point + rgb)", "def get_bin_color_features(img, p):\n return cv2.resize(img, p.spatial_size).ravel()", "def bin_spatial(image, size=(32, 32)):\n color1 = cv2.resize(image[:, :, 0], size).ravel()\n color2 = cv2.resize(image[:, :, 1], size).ravel()\n color3 = cv2.resize(image[:, :, 2], size).ravel()\n return np.hstack((color1, color2, color3))", "def bin_spatial(img, size=(32, 32)):\n color1 = cv2.resize(img[:, :, 0], size).ravel()\n color2 = cv2.resize(img[:, :, 1], size).ravel()\n color3 = cv2.resize(img[:, :, 2], size).ravel()\n return np.hstack((color1, color2, color3))", "def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)", "def get_pixel_list(img):\n orig_shape = img.shape # Remember the original shape of the img.\n # Store the img as a x by z array (z being the length of the colour space)\n # Essentially just a list of pixels.\n\n if len(img.shape) == 3:\n img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])\n elif len(img.shape) == 2:\n img = img.reshape(img.shape[0] * img.shape[1],)\n return orig_shape, img", "def change_bbox_color(img, boxes, p1, p2):\n points = np.unique(p1 + p2)\n\n for i in points:\n x1, y1, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]\n x2, y2 = x1+w, y1+h\n _ = cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 2) \n\n return img", "def cropByColor(img, cols):\n\n mask = maskByColors(img, cols)\n\n _, _, stats, _ = cv2.connectedComponentsWithStats(mask)\n compBBX = max(stats[1:], key=lambda s: s[4]) # in case of multiple matches\n # points of interest\n left = compBBX[0]\n right = left + compBBX[2]\n top = compBBX[1]\n bottom = top + compBBX[3]\n return mask[top: bottom, left: right]", "def draw_points(in_img, points, colour=(255, 0, 0)):\n img = in_img.copy()\n\n radius = int(max(img.shape) / 100)\n\n img = convert_when_colour(colour, img)\n\n for point in points:\n img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)\n\n return img", "def reduce_color(image):\n\n # http://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv-in-python\n w, h, _ = image.shape\n for row in xrange(h-1):\n for col in xrange(w-1):\n #pi = row * w * 3 + col * 3\n pixel = image[col][row]\n pixel[0] = __reduceColorValue(pixel[0])\n pixel[1] = __reduceColorValue(pixel[1])\n pixel[2] = __reduceColorValue(pixel[2])\n return image", "def colorCross(img, (x, y)):\n\tfor x_r in range(-1, 2):\n\t\tfor y_r in range(-1, 2):\n\t\t\tif x_r != 0 or y_r != 0:\n\t\t\t\t\timg.putpixel((x + x_r, y + y_r), (255, 0, 0, 0))\n\n\treturn img", "def load_color_image_features(img_path):\n ac = scipy.misc.imread(img_path, mode='RGB')\n ac = ac / (255.0 / 2) - 1.0\n return np.array(ac)", "def one_color(image,color=[0,0,255]):\r\n output = image.copy()\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n distance = calc_distance(color,image[line][column])\r\n if distance <=150:\r\n output[line][column]=[255,255,255]\r\n else:\r\n output[line][column]=[0,0,0]\r\n return output", "def get_color_in_region(self, start, end):\n # Input format: (start_x, start_y), (end_x, end_y)\n start_x, start_y = start\n end_x, end_y = end\n\n # x and y are flipped\n crop_img = self.img[start_x:(end_x + 1), start_y:(end_y + 1)]\n channels = cv2.mean(crop_img)\n\n # Return BGR\n return channels[0], channels[1], channels[2]", "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]", "def numpy_color2sepia(image):\n\n if (isinstance(image, str)):\n image = cv2.imread(image)\n\n filter = np.array([[.393, .769, .189],\n [.349, .686, .168],\n [.272, .534, .131]])\n\n N = len(image)\n M = len(image[0])\n utbilde = np.zeros((N,M,3))\n\n utbilde = image.dot(filter.T)\n utbilde[utbilde>255] = 255\n\n utbilde[...,[0,2]] = utbilde[...,[2,0]] #cv2cvtcolor konvertering fungerer ikke så måtte lage denne erstatningen\n\n\n return utbilde", "def numpy_color2sepia(filename):\n img = cv2.imread(filename)\n global height, width\n height, width = img.shape[:2]\n\n weights = np.array([ [ 0.393 , 0.769 , 0.189] ,[ 0.349 , 0.686 , 0.168] ,[ 0.272 , 0.534 , 0.131]])\n sepia = np.zeros((height, width, 3), np.uint8) #i,j\n sepia = img @ np.matrix.transpose(weights)\n sepia=np.flip(sepia, axis = 2)\n np.putmask(sepia, sepia>255, 255)\n sepia=sepia.astype('uint8')\n\n return sepia", "def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def convert_3d(points_2d, depth_image, image):\n fx = 525.0 # focal length x\n fy = 525.0 # focal length y\n cx = 319.5 # optical center x\n cy = 239.5 # optical center y\n factor = 5000 # for the 16-bit PNG files\n points_3d = []\n cols = []\n colors = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n for v in range(depth_image.shape[0]):\n for u in range(depth_image.shape[1]):\n Z = depth_image[v,u] / factor\n X = (u - cx) * Z / fx\n Y = (v - cy) * Z / fy\n points_3d.append([X,Y,Z])\n cols.append(colors[v,u])\n points = []\n for i in range(len(points_2d)):\n x = int(points_2d[i,0])\n y = int(points_2d[i,1])\n # print(y)\n Z = depth_image[y,x] / factor\n X = (x - cx) * Z / fx\n Y = (y - cy) * Z / fy\n points.append([X,Y,Z])\n points_3d = np.array(points_3d)\n cols = np.array(cols)\n points = np.array(points)\n \n return points, points_3d, cols", "def get_colour(self):\n \n distorted = []\n if piCameraFound:\n # Use piCamera\n \n #frame = self.capture_generator.next()\n #distorted = frame.array\n self.cam.capture(self.rawCapture, format=\"bgr\", use_video_port=True)\n distorted = self.rawCapture.array\n \n # clear the stream in preparation for the next frame\n self.rawCapture.truncate(0)\n \n else: # Use OpenCV\n retval, distorted = self.cam.read() # Read frame\n\n if not retval: # Error\n print \"Camera.get_colour: Could not read next frame\";\n exit(-1);\n \n \n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.CV_INTER_LINEAR)\n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.INTER_LINEAR)\n \n # Skip this part because it is slow\n #colour = cv2.undistort(distorted, self.intrinsic_matrix, self.distortion_coeffs)\n colour = distorted\n return colour, distorted", "def colors(im=None):\n if im is None:\n path = \"C:\\\\Users\\\\2053_HSUF\\\\PycharmProjects\\\\dialtones\\\\pics\\\\76aa16c9_playstore.png\"\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n for shade in range(13):\n colorized = cv2.applyColorMap(im, shade)\n cv2.imshow(\"yo\", colorized)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def draw_circles(img, points, color):\n for p in points:\n img = cv2.circle(img, (p[0], p[1]), 5, color, thickness=2)\n return img", "def color_map(n=256, normalized=False):\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((n, 3), dtype=dtype)\n for i in range(n):\n r = g = b = 0\n c = i + 1 # skip the first color (black)\n for j in range(8):\n r |= bitget(c, 0) << 7 - j\n g |= bitget(c, 1) << 7 - j\n b |= bitget(c, 2) << 7 - j\n c >>= 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap / 255 if normalized else cmap\n return cmap", "def split_image_into_channels(image):\n red_channel = image[:, :, 0]\n green_channel = image[:, :, 1]\n blue_channel = image[:, :, 2]\n return red_channel, green_channel, blue_channel", "def get_coloured_grid(self, r1, r2, r3, b1=4, b2=2.5, b3=1):\n r, g, b = np.frompyfunc(self.get_colour(r1, r2, r3, b1, b2, b3), 2, 3)(self.end_z, self.end_step)\n img_array = np.dstack((r, g, b))\n return Image.fromarray(np.uint8(img_array * 255))", "def extractComponent(img, cols, fromX, toX, fromY, toY):\n\n mask = maskByColors(img, cols)\n return mask[fromY: toY, fromX: toX]\n # return mask[fromY: toY+1, fromX: toX+1]", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b" ]
[ "0.6468485", "0.6468485", "0.6345939", "0.62494427", "0.61467564", "0.61461294", "0.59144837", "0.59094536", "0.58901876", "0.5879799", "0.58757746", "0.58628285", "0.5841417", "0.5825361", "0.58153236", "0.5774531", "0.57576954", "0.5743293", "0.57418716", "0.5741592", "0.5732375", "0.57321125", "0.57238275", "0.5722028", "0.57115716", "0.5697928", "0.5697568", "0.5684839", "0.5668898", "0.5645124" ]
0.702163
0
Given points in the 3D world, save the PLY file representing the point cloud. This function saves both the original file and a version to which an outlier removal process has been applied.
def save_3d_render( self, points: List[np.ndarray], colors: List[np.ndarray] ) -> None: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np.vstack(points).astype(np.float64)) pcd.colors = o3d.utility.Vector3dVector(np.vstack(colors)) if self.debug: o3d.visualization.draw_geometries([pcd]) if not self.debug: o3d.io.write_point_cloud(f"results/{self.filename[:-4]}.ply", pcd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, filename):\n if len(self.interpolated_points) > 0:\n red, green, blue = zip(*self.interpolated_points)\n red = [np.asscalar(x) for x in red]\n green = [np.asscalar(x) for x in green]\n blue = [np.asscalar(x) for x in blue]\n output_type = \"interpolated\"\n print(\"Exporting interpolated points\")\n elif len(self.main_cluster) > 0:\n red, green, blue = self.get_color_lookup_table_points(self.main_cluster)\n output_type = \"clustered\"\n print(\"Exporting cluster points\")\n else:\n red = self.color_lookup_table_points[0]\n green = self.color_lookup_table_points[1]\n blue = self.color_lookup_table_points[2]\n output_type = \"resized\"\n print(\"Exporting resized points\")\n\n data = dict(\n red=red,\n green=green,\n blue=blue\n )\n\n filename = f'{filename}_{output_type}.pickle'\n with open(filename, 'wb') as outfile:\n pickle.dump(data, outfile, protocol=2)\n # stores data of color lookup table in file as pickle for efficient loading (yaml is too slow)\n\n print(f\"Output saved to '{filename}'.\")", "def save_point_cloud(self, point_cloud, iteration, mode):\n path = os.path.join(self.point_cloud_dir, '%06d.%s.point.npy' % (iteration, mode))\n np.save(path, point_cloud)", "def writePointCloudVTP(self, outFile):\n #points\n vtkPts = vtk.vtkPoints()\n cells = vtk.vtkCellArray()\n\n # setup colors\n Colors = vtk.vtkFloatArray()\n #Colors.SetNumberOfComponents(3)\n Colors.SetNumberOfTuples(self.Npts)\n Colors.SetName(self.label) #can change to any string\n\n for i in range(self.Npts):\n x = self.ctrs[i,0]\n y = self.ctrs[i,1]\n z = self.ctrs[i,2]\n id = vtkPts.InsertNextPoint(x,y,z)\n cells.InsertNextCell(1)\n cells.InsertCellPoint(id)\n Colors.InsertTuple( i, [self.scalar[i]] )\n\n\n #build final vtp object for writing\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtkPts)\n polydata.SetVerts(cells)\n polydata.GetPointData().SetScalars(Colors)\n polydata.Modified()\n\n writer = vtk.vtkXMLPolyDataWriter()\n writer.DebugOn()\n writer.SetFileName(outFile)\n writer.SetInputData(polydata)\n #writer.SetDataModeToBinary()\n writer.Write()\n\n return", "def save_xyz(self, filename, save_ghosts=True, save_natom=True):\n outfile = open(filename, 'w')\n outfile.write(self.save_string_xyz(save_ghosts, save_natom))\n outfile.close()", "def write_xyz_file(allxyz):\n if SAVEXYZ:\n print('+> Saving riverbed topography file...', end='')\n if MODE == 1:\n np.savetxt('kinoshita_topo.xyz', allxyz, fmt='%.6e')\n elif MODE == 2:\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_topo.xyz', allxyz, fmt='%.6e')\n print(' [done]')", "def dump(points, filename):\n with open(filename, 'w') as f:\n for i, pts in enumerate(points):\n for x, y in pts:\n f.write(f\"{x:.3f},{y:.3f},{i}\\n\")\n print(f\"Dumping data to {filename}...\")", "def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)", "def save(self, _name):\r\n try:\r\n with open(_name, 'w+') as fout:\r\n fout.write(\".cube file generated from prt_esolv.py\\n\")\r\n fout.write(f\"{_name}\\n\")\r\n\r\n fout.write(\r\n f\"{int(self.n_atoms)} {float(self.origin[0])} {float(self.origin[1])} {float(self.origin[2])}\\n\")\r\n\r\n fout.write(f\"{int(self.n_x)} {float(self.x[0])} {float(self.x[1])} {float(self.x[2])}\\n\")\r\n fout.write(f\"{int(self.n_y)} {float(self.y[0])} {float(self.y[1])} {float(self.y[2])}\\n\")\r\n fout.write(f\"{int(self.n_z)} {float(self.z[0])} {float(self.z[1])} {float(self.z[2])}\\n\")\r\n\r\n for atom, xyz in zip(self.atoms, self.atoms_xyz):\r\n fout.write(f\"{atom} 0 {xyz[0]} {xyz[1]} {xyz[2]}\\n\")\r\n\r\n for ix in range(self.n_x):\r\n for iy in range(self.n_y):\r\n for iz in range(self.n_z):\r\n fout.write(f\"{self.data[ix][iy][iz]}\")\r\n if iz % 6 == 5:\r\n fout.write('\\n')\r\n fout.write(\"\\n\")\r\n except IOError:\r\n print(f\"Can't create {_name} file!!!\")\r\n raise\r\n\r\n return None", "def save_spi3d(self):\n lut = self.generate_lut()\n file_path = os.path.join(self.output, self.name)\n file_io.save_file(lut, file_path)", "def outputPulses(self,filename):\n np.save(filename,self.getData())\n return", "def save_trained_model(self, filename):\n d = self.pack_npz()\n with open(filename, 'wb') as f:\n np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)", "def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')", "def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def save_colored_point_cloud_to_ply(rgb_image,\n depth_image,\n rgb_intrinsics,\n rgb_distortion,\n depth_intrinsics,\n extrinsics,\n cloud_path,\n use_registered_depth=False):\n log.debug(\"Generating a colored point cloud from an RGB-D frame.\")\n\n if use_registered_depth:\n log.debug((\"Using depth_registered image, therefore the resulting \"\n \"point cloud is organized in the order of the rgb image.\"\n \"NOTE: Make sure that the input depth_image is \"\n \"registered!\"))\n\n depth_points_3d = cv2.rgbd.depthTo3d(depth_image, rgb_intrinsics)\n\n n_rows, n_cols, n_coord = np.shape(depth_points_3d)\n\n with open(cloud_path, 'wb') as ply_file:\n ply_file.write(\n (ply_header % dict(n_points=n_rows * n_cols)).encode('utf-8'))\n\n for i in range(n_rows):\n for j in range(n_cols):\n point_x = depth_points_3d[i, j, 0]\n point_y = depth_points_3d[i, j, 1]\n point_z = depth_points_3d[i, j, 2]\n\n point_b = rgb_image[i, j, 0]\n point_g = rgb_image[i, j, 1]\n point_r = rgb_image[i, j, 2]\n\n if (point_z > DISTANCE_LOWER_LIMIT and\n point_z < DISTANCE_UPPER_LIMIT):\n ply_file.write((point_ply % dict(\n x=point_x,\n y=point_y,\n z=point_z,\n r=point_r,\n g=point_g,\n b=point_b)).encode('utf-8'))\n else:\n ply_file.write((point_ply % dict(\n x=0.0, y=0.0, z=0.0, r=0, g=0,\n b=0)).encode('utf-8'))\n\n ply_file.write(\n (end_ply % dict(width=n_cols, height=n_rows)).encode('utf-8'))\n\n else:\n log.debug((\"Using unregistered depth image, therefore the resulting \"\n \"point cloud is organized in the order of the \"\n \"depth image.\"))\n\n depth_points_3d = cv2.rgbd.depthTo3d(depth_image, depth_intrinsics)\n depth_points_in_rgb_frame = cv2.perspectiveTransform(\n depth_points_3d, extrinsics)\n\n n_rows, n_cols, n_coord = np.shape(depth_points_in_rgb_frame)\n\n fx = rgb_intrinsics[0, 0]\n fy = rgb_intrinsics[1, 1]\n cx = rgb_intrinsics[0, 2]\n cy = rgb_intrinsics[1, 2]\n\n with open(cloud_path, 'wb') as ply_file:\n ply_file.write(\n (ply_header % dict(n_points=n_rows * n_cols)).encode('utf-8'))\n\n for i in range(n_rows):\n for j in range(n_cols):\n point_x = depth_points_in_rgb_frame[i, j, 0]\n point_y = depth_points_in_rgb_frame[i, j, 1]\n point_z = depth_points_in_rgb_frame[i, j, 2]\n\n height, width, channels = rgb_image.shape\n if (point_z > DISTANCE_LOWER_LIMIT and\n point_z < DISTANCE_UPPER_LIMIT):\n u = int(fx * point_x / point_z + cx)\n v = int(fy * point_y / point_z + cy)\n\n if (u >= 0 and u < width and v >= 0 and v < height):\n point_b = rgb_image[v, u, 0]\n point_g = rgb_image[v, u, 1]\n point_r = rgb_image[v, u, 2]\n\n ply_file.write((point_ply % dict(\n x=point_x,\n y=point_y,\n z=point_z,\n r=point_r,\n g=point_g,\n b=point_b)).encode('utf-8'))\n else:\n ply_file.write((point_ply % dict(\n x=0.0, y=0.0, z=0.0, r=0, g=0,\n b=0)).encode('utf-8'))\n else:\n ply_file.write((point_ply % dict(\n x=0.0, y=0.0, z=0.0, r=0, g=0,\n b=0)).encode('utf-8'))\n\n ply_file.write(\n (end_ply % dict(width=n_cols, height=n_rows)).encode('utf-8'))\n\n log.debug('Finished writing the file: ' + cloud_path)", "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def write_pc_embedding(filename, xyz, embeddings):\n color = converter.embedding_to_color(embeddings)\n write_pc(filename, xyz, color)", "def write_input(eval_points, filename='input.txt'):\n util.save(eval_points, filename)", "def save_hypercube(fname, sample_points):\n # Get parameter names and build header\n pnames = sample_points.keys()\n pnames.sort()\n hdr = \" \".join(pnames)\n\n # Build array\n dat = np.column_stack([sample_points[p] for p in pnames])\n np.savetxt(fname, dat, header=hdr, fmt=\"%4.4e\")\n print(\"Saved hypercube to '%s'.\" % fname)", "def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')", "def save_gps_coordinates(points: list, file_name: str):\n\n with open(file_name, \"w\") as file:\n for point in points:\n if isinstance(point[0], list):\n str_point = str(point[0][0]) + \" \" + \\\n str(point[0][1]) + \" \" + str(point[1]) + \"\\n\"\n else:\n str_point = str(point[0]) + \" \" + str(point[1]) + \"\\n\"\n file.write(str_point)", "def writePosFilesStep(self): \n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getUntilted())\n \n writeSetOfCoordinates(self._getExtraPath(), self.inputCoordinatesTiltedPairs.get().getTilted())", "def save_spi3d(self):\n for filename, colormap in colors.colormaps.items():\n if self.test:\n self.print_colormap(filename, colormap)\n lut = self.generate_spi3d_from_colormap(colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)\n\n for filename, ev_colormap in colors.ev_colormaps.items():\n if self.test:\n self.print_colormap(filename, ev_colormap)\n lut = self.generate_spi3d_from_evs(ev_colormap)\n file_path = os.path.join(self.output, filename)\n file_io.save_file(lut, file_path)", "def SavePYP(self, filename=None):\n if filename is None:\n pne, ext = os.path.splitext(self.path)\n self.pyppath = pne+'_out.pyp'\n else:\n self.pyppath = filename\n writefile(self.pyppath, self.latexlist)\n return self.pyppath", "def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)", "def write_data_to_file(pos, fps, data_file):\n xs = []\n for x,y in pos:\n xs.append(x)\n with open(data_file,'wb') as f:\n np.save(f,pos)\n np.save(f,xs)\n np.save(f,fps)", "def savepotential(pot, xx):\n potential = []\n coordinates = []\n for ii in range(0, len(pot)):\n coordinates.append(xx[ii])\n potential.append(pot[ii])\n np.savetxt(\"potential.dat\", np.transpose([coordinates, potential]))", "def save_ckpt(objects, epoch, score, ckpt_file):\n state_dicts = {name: obj.state_dict() for name, obj in objects.items() if obj is not None}\n ckpt = dict(state_dicts=state_dicts,\n epoch=epoch,\n score=score)\n may_make_dir(osp.dirname(ckpt_file))\n torch.save(ckpt, ckpt_file)\n msg = '=> Checkpoint Saved to {}'.format(ckpt_file)\n print(msg)", "def PointCloudfromStructOutput(self,file):\n print(\"Creating Structure Point Cloud\")\n xyz = self.readStructOutput(file)\n pc = np.zeros((int(len(xyz)/2.0),3))\n pc[:,0] = xyz[::2,0]*1000\n pc[:,1] = xyz[::2,1]*1000\n pc[:,2] = xyz[::2,2]*1000\n head = \"\"\"X,Y,Z\"\"\"\n np.savetxt(file, pc, delimiter=',',fmt='%.10f', header=head)\n return", "def generate_pointcloud(rgb_file, mask_file,depth_file,ply_file):\n rgb = Image.open(rgb_file)\n # depth = Image.open(depth_file)\n depth = Image.open(depth_file).convert('I')\n mask = Image.open(mask_file).convert('I')\n\n # if rgb.size != depth.size:\n # raise Exception(\"Color and depth image do not have the same resolution.\")\n # if rgb.mode != \"RGB\":\n # raise Exception(\"Color image is not in RGB format\")\n # if depth.mode != \"I\":\n # raise Exception(\"Depth image is not in intensity format\")\n\n\n points = [] \n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u,v))\n # Z = depth.getpixel((u,v)) / scalingFactor\n # if Z==0: continue\n # X = (u - centerX) * Z / focalLength\n # Y = (v - centerY) * Z / focalLength\n if (mask.getpixel((u,v))<55):\n Z = depth.getpixel((u, v))*.22 \n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\nformat ascii 1.0\nelement vertex %d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty uchar alpha\nend_header\n%s\n'''%(len(points),\"\".join(points)))\n file.close()" ]
[ "0.6294344", "0.62656236", "0.60348487", "0.5977392", "0.5924391", "0.58835065", "0.58382964", "0.5795747", "0.5772745", "0.5762645", "0.5722541", "0.5684895", "0.56516", "0.5644123", "0.56439006", "0.5636157", "0.56341565", "0.56338894", "0.5624714", "0.56098646", "0.55869716", "0.55792487", "0.5574017", "0.5558632", "0.55427116", "0.55271816", "0.5503251", "0.5494104", "0.548756", "0.54664505" ]
0.639831
0
Given a plane represented by its origin and a normal and a list of rays, compute the intersections between the plane and the rays.
def compute_intersections( self, plane: Plane, directions: List[np.ndarray] ) -> List[np.ndarray]: return [ line_plane_intersection( plane_origin=plane.origin, plane_normal=plane.normal, line_direction=direction, ) for direction in directions ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersects(self, ray):\n theta = 45\n H = 512\n W = 512\n A = self.origin\n B = Point(W, A.y, A.z)\n C = Point(B.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n D = Point(A.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n vec3 = ray.direction * self.normal\n if vec3 != 0:\n vec1 = self.origin - ray.origin\n vec2 = vec1 * self.normal\n dist = vec2 / vec3\n if dist > 0:\n point_on_plane = ray.origin + dist * ray.direction\n if A.x <= point_on_plane.x <= B.x and A.y <= point_on_plane.y <= D.y and B.z <= point_on_plane.z <= C.z:\n #print A, B, C, D, point_on_plane\n return dist", "def ray_intersect_triangle(origin, direction, triangle, use_planes=False):\n origin = np.array(origin)\n direction = np.array(direction)\n if len(direction.shape) == 1:\n direction = direction.reshape(1, *direction.shape)\n return_single = True\n else:\n return_single = False\n triangle = np.array(triangle)\n if len(triangle.shape) == 2:\n triangle = triangle.reshape(1, *triangle.shape)\n\n v0 = triangle[..., 0, :]\n v1 = triangle[..., 1, :]\n v2 = triangle[..., 2, :]\n u = v1 - v0\n v = v2 - v0\n normal = np.cross(u, v)\n b = np.inner(normal, direction)\n a = my_inner(normal[..., None, :], v0[..., None, :] - origin[None, ..., :])\n\n rI = a / b\n # ray is parallel to the plane\n rI[(b == 0.0)*(a != 0.0)] = np.nan\n # ray is parallel and lies in the plane\n rI[(b == 0.0)*(a == 0.0)] = 0\n\n # check whether the intersection is behind the origin of the ray\n rI[rI < 0.0] = np.nan\n\n if not use_planes:\n w = origin + rI[..., None] * direction - v0[..., None, :]\n denom = my_inner(u, v) * my_inner(u, v) - my_inner(u, u) * my_inner(v, v)\n\n si = (my_inner(u, v)[..., None] * my_inner(w, v[..., None, :]) - my_inner(v, v)[..., None] * my_inner(w, u[..., None, :])) / denom[:, None]\n rI[((si < 0)+(si > 1.0)).astype(bool)] = np.nan\n\n ti = (my_inner(u, v)[..., None] * my_inner(w, u[..., None, :]) - my_inner(u, u)[..., None] * my_inner(w, v[..., None, :])) / denom[:, None]\n rI[((ti < 0.0) + (si + ti > 1.0)).astype(bool)] = np.nan\n\n def nanargmin(a, axis):\n from numpy.lib.nanfunctions import _replace_nan\n a, mask = _replace_nan(a, np.inf)\n res = np.argmin(a, axis=axis)\n return res\n\n index = nanargmin(rI, axis=0)\n rI = rI[index, np.arange(len(index))]\n point = origin + rI[..., None] * direction\n\n if return_single:\n return point[0]\n return point", "def general_plane_intersection(n_a, da, n_b, db):\n \n # https://en.wikipedia.org/wiki/Intersection_curve\n \n n_a = np.array(n_a)\n n_b = np.array(n_b)\n da = np.array(da)\n db = np.array(db)\n \n l_v = np.cross(n_a, n_b)\n norm_l = sqrt(np.dot(l_v, l_v))\n if norm_l == 0:\n return None\n else:\n l_v /= norm_l\n aa = np.dot(n_a, n_a)\n bb = np.dot(n_b, n_b)\n ab = np.dot(n_a, n_b)\n d_ = 1./(aa*bb - ab*ab)\n l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b\n \n return l_v, l_0", "def three_d_vector_plane_intersection(point_a, point_b, point_c, point_d, point_e):\n a = np.array(point_a)\n b = np.array(point_b)\n c = np.array(point_c)\n nv = plane_equation(point_c, point_d, point_e)\n t = (nv[0] * c[0] + nv[1] * c[1] + nv[2] * c[2] - nv[0] * a[0] - nv[1] * a[1] - nv[2] * a[2]) / \\\n (nv[0] * (b[0] - a[0]) + nv[1] * (b[1] - a[1]) + nv[2] * (b[2]-a[2]))\n x = a[0] + t * (b[0] - a[0])\n y = a[1] + t * (b[1] - a[1])\n z = a[2] + t * (b[2] - a[2])\n intersection = np.array([x, y, z])\n return intersection", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def intersect(self, rays):\n raise NotImplementedError", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False", "def triangle_plane_intersection(self,p0,p1,p2,point,normal):\n\t\ttol=0.00001\n\t\n\t\t# handle all of the stupid cases before we do costly math\n\t\n\t\t#basic stuff\n\t\tp0dp=numpy.dot(p0-point,normal)\n\t\tp1dp=numpy.dot(p1-point,normal)\n\t\tp2dp=numpy.dot(p2-point,normal)\n\t\tp0ip=numpy.abs(p0dp)<tol # p0 in-plane\n\t\tp1ip=numpy.abs(p1dp)<tol # p1 in-plane\n\t\tp2ip=numpy.abs(p2dp)<tol # p02in-plane\n\n\t\t# are all vertices of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(p2ip): # yes, triangle is in the plane\n\t\t\treturn [p0,p1,p2]\n\t\n\t\t# are all vertices of the triangle on the same side?\n\t\tif (not(p0ip))&(not(p1ip))&(not(p2ip))&(numpy.sign(p0dp)==numpy.sign(p1dp))&(numpy.sign(p0dp)==numpy.sign(p2dp)): # yup, they are all on the same side\n\t\t\treturn []\n\t\n\t\t# is one vertex in the plane?\n\t\tif (p0ip)&(not(p1ip))&(not(p2ip)): #just p0 in plane\n\t\t\treturn [p0]\n\t\telif (not(p0ip))&(p1ip)&(not(p2ip)): #just p1 in plane\n\t\t\treturn [p1]\n\t\telif (not(p0ip))&(not(p1ip))&(p2ip): #just p2 in plane\n\t\t\treturn [p2]\n\t\n\t\t# is one line of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(not(p2ip)): #L1 in plane\n\t\t\treturn [p0,p1]\n\t\telif (not(p0ip))&(p1ip)&(p2ip): #L2 in plane\n\t\t\treturn [p1,p2]\n\t\telif (p0ip)&(not(p1ip))&(p2ip): #L3 in plane\n\t\t\treturn [p0,p2]\n\t\n\t\t# if we have gotten this far, we have to actually calculate intersections\n\t\tif numpy.sign(p0dp)==numpy.sign(p1dp):\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l2b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l2i,l3i]\n\t\telif numpy.sign(p2dp)==numpy.sign(p1dp):\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l1b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l3i]\n\t\telse:\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tif (l1b)&(l2b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l2i]\n\t\n\t\t# If the function makes it this far, I have no idea what is going on.\n\t\treturn \"bananna pants\"", "def intersect_plane(L, plane):\n \n # Line U, V\n # Plane N n\n # (VxN-nU:U.N)\n # Note that this is in homogeneous coordinates.\n # intersection of plane (n,p) with the line (v,p)\n # returns point and line parameter\n \n \n den = np.dot(L.w, plane.n)\n \n if abs(den) > (100*_eps):\n P = -(np.cross(L.v, plane.n) + plane.p * L.w) / den\n p = (np.cross(L.v, plane.n) - plane.p * L.w) / den\n \n P = L.pp\n t = np.dot( P-p, N)\n return namedtuple('intersect_plane', 'p t')(P, t)\n else:\n return None", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def faces_intersecting_plane(vertices, faces, plane):\n vg.shape.check(locals(), \"vertices\", (-1, 3))\n vg.shape.check(locals(), \"faces\", (-1, 3))\n check_indices(faces, len(vertices), \"faces\")\n\n signed_distances = plane.signed_distance(vertices)\n return np.abs(np.sign(signed_distances)[faces].sum(axis=1)) != 3", "def intersection(self, ray):\n d_proj = self._normal.dot(ray.d)\n if abs(d_proj) < bounds.too_small:\n return -1.0\n s_proj = (self._origin - ray.o).dot(self._normal)\n if d_proj * s_proj < 0.0:\n # ray going away from plane\n return -1.0\n else:\n return s_proj / d_proj", "def intersect(self, rays):\n has_segments = bool(self._merged_segments)\n has_arcs = bool(self._merged_arcs)\n \n seg = {}\n arc = {}\n \n if has_segments:\n # do segment intersection\n seg[\"x\"], seg[\"y\"], seg[\"valid\"], seg[\"ray_u\"], seg[\"segment_u\"], \\\n seg[\"gather_ray\"], seg[\"gather_segment\"] = self._segment_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_segments[\"x_start\"],\n self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"],\n self._merged_segments[\"y_end\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n seg[\"norm\"] = tf.gather(\n tf.atan2(\n self._merged_segments[\"y_end\"] - self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"] - self._merged_segments[\"x_start\"]\n ) + PI/2.0,\n seg[\"gather_segment\"]\n )\n \n if has_arcs:\n # do arc intersection\n arc[\"x\"], arc[\"y\"], arc[\"valid\"], arc[\"ray_u\"], arc[\"arc_u\"], \\\n arc[\"gather_ray\"], arc[\"gather_arc\"] = self._arc_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_arcs[\"x_center\"],\n self._merged_arcs[\"y_center\"],\n self._merged_arcs[\"angle_start\"],\n self._merged_arcs[\"angle_end\"],\n self._merged_arcs[\"radius\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n arc[\"norm\"] = self._get_arc_norm(\n self._merged_arcs[\"radius\"], arc[\"arc_u\"], arc[\"gather_arc\"]\n )\n \n if has_segments and has_arcs:\n # has arcs and segments, so we need to chooose between segment and arc \n # intersections.\n seg[\"valid\"], arc[\"valid\"] = self._seg_or_arc(\n seg[\"ray_u\"], arc[\"ray_u\"], seg[\"valid\"], arc[\"valid\"]\n )\n \n return seg, arc", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result", "def intersects(self, ray):\n def raySegmentIntersectAB(self, ray):\n \"\"\"\n recibes a ray. checks if it intersects the segment\n dot: denominator. if dot = 0 they're paralel\n t1: distance from origin to intersection\n t2: intersection IN the segment\n \"\"\"\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectCD(self, ray):\n v1 = ray.origin - self.pointC\n v2 = self.pointD - self.pointC\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectAC(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointC - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n\n def raySegmentIntersectBD(self, ray):\n v1 = ray.origin - self.pointB\n v2 = self.pointD - self.pointB\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n \n minD = 9999\n distance_AB = raySegmentIntersectAB(self, ray)\n distance_CD = raySegmentIntersectCD(self, ray)\n distance_AC = raySegmentIntersectAC(self, ray)\n distance_BD = raySegmentIntersectBD(self, ray)\n \n if distance_AB is not None:\n minD = distance_AB\n \n if distance_CD is not None:\n if distance_CD < minD:\n minD = distance_CD\n \n if distance_AC is not None:\n if distance_AC < minD:\n minD = distance_AC\n \n if distance_BD is not None:\n if distance_BD < minD:\n minD = distance_BD\n\n if minD is not None:\n if minD != 9999:\n return minD\n return None\n \"\"\"\n minD = raySegmentIntersectBD(self, ray)\n #print (minD)\n return minD\n \"\"\"", "def batch_mesh_contains_points(\n ray_origins, # point cloud as origin of rays\n obj_triangles,\n direction=torch.Tensor([0.4395064455, 0.617598629942, 0.652231566745]),\n):\n tol_thresh = 0.0000001\n batch_size = obj_triangles.shape[0]\n triangle_nb = obj_triangles.shape[1]\n point_nb = ray_origins.shape[1]\n\n # Batch dim and triangle dim will flattened together\n batch_points_size = batch_size * triangle_nb\n # Direction is random but shared\n v0, v1, v2 = obj_triangles[:, :, 0], obj_triangles[:, :, 1], obj_triangles[:, :, 2]\n # Get edges\n v0v1 = v1 - v0\n v0v2 = v2 - v0\n\n direction = direction.to(ray_origins.device)\n # Expand needed vectors\n batch_direction = direction.view(1, 1, 3).expand(batch_size, triangle_nb, 3)\n\n # Compute ray/triangle intersections\n pvec = torch.cross(batch_direction, v0v2, dim=2)\n dets = torch.bmm(\n v0v1.view(batch_points_size, 1, 3), pvec.view(batch_points_size, 3, 1)\n ).view(batch_size, triangle_nb)\n\n # Check if ray and triangle are parallel\n parallel = abs(dets) < tol_thresh\n invdet = 1 / (dets + 0.1 * tol_thresh)\n\n # Repeat mesh info as many times as there are rays\n triangle_nb = v0.shape[1]\n v0 = v0.repeat(1, point_nb, 1)\n v0v1 = v0v1.repeat(1, point_nb, 1)\n v0v2 = v0v2.repeat(1, point_nb, 1)\n hand_verts_repeated = (\n ray_origins.view(batch_size, point_nb, 1, 3)\n .repeat(1, 1, triangle_nb, 1)\n .view(ray_origins.shape[0], triangle_nb * point_nb, 3)\n )\n pvec = pvec.repeat(1, point_nb, 1)\n invdet = invdet.repeat(1, point_nb)\n tvec = hand_verts_repeated - v0\n u_val = (\n torch.bmm(\n tvec.view(batch_size * tvec.shape[1], 1, 3),\n pvec.view(batch_size * tvec.shape[1], 3, 1),\n ).view(batch_size, tvec.shape[1])\n * invdet\n )\n # Check ray intersects inside triangle\n u_correct = (u_val > 0) * (u_val < 1)\n qvec = torch.cross(tvec, v0v1, dim=2)\n\n batch_direction = batch_direction.repeat(1, point_nb, 1)\n v_val = (\n torch.bmm(\n batch_direction.view(batch_size * qvec.shape[1], 1, 3),\n qvec.view(batch_size * qvec.shape[1], 3, 1),\n ).view(batch_size, qvec.shape[1])\n * invdet\n )\n v_correct = (v_val > 0) * (u_val + v_val < 1)\n t = (\n torch.bmm(\n v0v2.view(batch_size * qvec.shape[1], 1, 3),\n qvec.view(batch_size * qvec.shape[1], 3, 1),\n ).view(batch_size, qvec.shape[1])\n * invdet\n )\n # Check triangle is in front of ray_origin along ray direction\n t_pos = t >= tol_thresh\n parallel = parallel.repeat(1, point_nb)\n # # Check that all intersection conditions are met\n try:\n not_parallel = 1 - parallel\n except:\n not_parallel = parallel==False\n final_inter = v_correct * u_correct * not_parallel * t_pos\n # Reshape batch point/vertices intersection matrix\n # final_intersections[batch_idx, point_idx, triangle_idx] == 1 means ray\n # intersects triangle\n final_intersections = final_inter.view(batch_size, point_nb, triangle_nb)\n # Check if intersection number accross mesh is odd to determine if point is\n # outside of mesh\n exterior = final_intersections.sum(2) % 2 == 0\n return exterior", "def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12):\n\n ndotu = planeNormal.dot(rayDirection)\n if abs(ndotu) < epsilon:\n raise RuntimeError(\"no intersection or line is within plane\")\n\n w = rayPoint - planePoint\n si = -planeNormal.dot(w) / ndotu\n Psi = w + si * rayDirection + planePoint\n return Psi", "def ray_poly_intersect(p_ray, d_ray, poly):\n ret = []\n\n for s in poly.segments:\n p = ray_segment_intersect(p_ray, d_ray, s)\n\n if p is not None:\n ret.append(p)\n\n return ret", "def intersectsRay(self, ray):\n pass", "def intersect(self, ray):\n # TODO A5 (Step3 and Step4) implement this function\n # For step 4, check if uvs and normals are not None (respectively)\n # If so, then interpolate them\n\n # batch_intersect returns t, beta, gamma, i\n posns = self.posns\n uvs = self.uvs\n inds = self.inds\n normals = self.normals\n t, beta, gamma, i = batch_intersect(posns[inds[:, :]], ray)\n if (t == np.inf):\n return no_hit\n vs = posns[inds[i, :]]\n P = ray.origin + t * ray.direction\n\n if (t == np.inf):\n return no_hit\n else:\n\n alpha = 1 - beta - gamma\n\n if uvs is not None:\n\n uv0 = uvs[inds[i][0]]\n uv1 = uvs[inds[i][1]]\n uv2 = uvs[inds[i][2]]\n\n uv = alpha * uv0 + beta * uv1 + gamma * uv2\n\n else:\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n uv = vec([u, v])\n\n if normals is not None:\n\n n0 = normals[inds[i][0]]\n n1 = normals[inds[i][1]]\n n2 = normals[inds[i][2]]\n\n unit_normal = normalize(alpha * n0 + beta * n1 + gamma * n2)\n\n else:\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n return Hit(t, P, unit_normal, uv, self.material)", "def intersect_mesh_with_plane(\n vertices, faces, plane, neighborhood=None, ret_pointcloud=False\n):\n if neighborhood is not None:\n vg.shape.check(locals(), \"neighborhood\", (-1, 3))\n\n # 1: Select those faces that intersect the plane, fs\n fs = faces[faces_intersecting_plane(vertices, faces, plane)]\n\n if len(fs) == 0:\n # Nothing intersects.\n if ret_pointcloud:\n return np.zeros((0, 3))\n elif neighborhood is not None:\n return None\n else:\n return []\n\n # and edges of those faces\n es = np.vstack((fs[:, (0, 1)], fs[:, (1, 2)], fs[:, (2, 0)]))\n\n # 2: Find the edges where each of those faces actually cross the plane\n intersection_map = EdgeMap()\n\n pts, pt_is_valid = plane.line_segment_xsections(\n vertices[es[:, 0]], vertices[es[:, 1]]\n )\n valid_pts = pts[pt_is_valid]\n valid_es = es[pt_is_valid]\n for val, e in zip(valid_pts, valid_es):\n if not intersection_map.contains(e[0], e[1]):\n intersection_map.add(e[0], e[1], val)\n verts = np.array(intersection_map.values)\n\n # 3: Build the edge adjacency graph\n G = Graph(verts.shape[0])\n for f in fs:\n # Since we're dealing with a triangle that intersects the plane,\n # exactly two of the edges will intersect (note that the only other\n # sorts of \"intersections\" are one edge in plane or all three edges in\n # plane, which won't be picked up by mesh_intersecting_faces).\n e0 = intersection_map.index(f[0], f[1])\n e1 = intersection_map.index(f[0], f[2])\n e2 = intersection_map.index(f[1], f[2])\n if e0 is None:\n G.add_edge(e1, e2)\n elif e1 is None:\n G.add_edge(e0, e2)\n else:\n G.add_edge(e0, e1) # pragma: no cover\n\n # 4: Find the paths for each component\n components = []\n components_closed = []\n while len(G) > 0:\n path = G.pop_euler_path()\n if path is None:\n raise ValueError( # pragma: no cover\n \"Mesh slice has too many odd degree edges; can't find a path along the edge\"\n )\n component_verts = verts[path]\n\n if np.all(component_verts[0] == component_verts[-1]):\n # Because the closed polyline will make that last link:\n component_verts = np.delete(component_verts, 0, axis=0)\n components_closed.append(True)\n else:\n components_closed.append(False)\n components.append(component_verts)\n\n # 6 (optional - only if 'neighborhood' is provided): Use a KDTree to select\n # the component with minimal distance to 'neighborhood'.\n if neighborhood is not None and len(components) > 1:\n tree = cKDTree(neighborhood)\n\n # The number of components will not be large in practice, so this loop\n # won't hurt.\n means = [np.mean(tree.query(component)[0]) for component in components]\n index = np.argmin(means)\n if ret_pointcloud:\n return components[index]\n else:\n return Polyline(components[index], is_closed=components_closed[index])\n elif neighborhood is not None and len(components) == 1:\n if ret_pointcloud: # pragma: no cover\n return components[0] # pragma: no cover\n else:\n return Polyline(\n components[0], is_closed=components_closed[0]\n ) # pragma: no cover\n else:\n # No neighborhood provided, so return all the components, either in a\n # pointcloud or as separate polylines.\n if ret_pointcloud:\n return np.vstack(components)\n else:\n return [\n Polyline(v, is_closed=closed)\n for v, closed in zip(components, components_closed)\n ]", "def intersects(self, ray):\n\n sphere_to_ray = ray.origin - self.origin\n b = 2 * ray.direction * sphere_to_ray\n c = sphere_to_ray ** 2 - self.radius ** 2\n discriminant = b ** 2 - 4 * c\n\n if discriminant >= 0:\n dist = (-b - math.sqrt(discriminant)) / 2\n if dist > 0:\n return dist", "def getIntersection(self, ray):\n pass", "def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n vs = self.vs\n\n a = vs[0][0] - vs[1][0]\n b = vs[0][1] - vs[1][1]\n c = vs[0][2] - vs[1][2]\n d = vs[0][0] - vs[2][0]\n e = vs[0][1] - vs[2][1]\n f = vs[0][2] - vs[2][2]\n\n ray_dir = ray.direction\n ray_orig = ray.origin\n\n g = ray_dir[0]\n h = ray_dir[1]\n i = ray_dir[2]\n j = vs[0][0] - ray_orig[0]\n k = vs[0][1] - ray_orig[1]\n l = vs[0][2] - ray_orig[2]\n\n M = a * (e * i - h * f) + b * (g * f - d * i) + c * (d * h - e * g)\n\n t = -(f * (a * k - j * b) + e * (j * c - a * l) + d *\n (b * l - k * c)) / M\n\n if (t < ray.start or t > ray.end):\n return no_hit\n\n gamma = (i * (a * k - j * b) + h * (j * c - a * l) + g *\n (b * l - k * c)) / M\n\n if (gamma < 0 or gamma > 1):\n return no_hit\n\n beta = (j * (e * i - h * f) + k * (g * f - d * i) +\n l * (d * h - e * g)) / M\n\n if (beta < 0 or beta > 1 - gamma):\n return no_hit\n\n P = ray_orig + t * ray_dir\n\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n return Hit(t, P, unit_normal, vec([u, v]), self.material)", "def intersects(self, ray):\n sphere_to_ray = ray.origin - self.center\n a = 1\n b = 2 * ray.direction.dot_product(sphere_to_ray)\n c = sphere_to_ray.dot_product(sphere_to_ray) - self.radius * self.radius\n discriminant = b * b - 4 * a * c\n\n if discriminant >= 0:\n dist = (-b - sqrt(discriminant)) / 2\n if dist > 0:\n return dist\n\n return None", "def LinePlaneIntersection(line, plane):\n plane = rhutil.coerceplane(plane, True)\n line_points = rhutil.coerce3dpointlist(line, True)\n line = Rhino.Geometry.Line(line_points[0], line_points[1])\n rc, t = Rhino.Geometry.Intersect.Intersection.LinePlane(line, plane) \n if not rc: return scriptcontext.errorhandler()\n return line.PointAt(t)", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def intersect_plane(self, other: Plane, **kwargs) -> Line:\n if self.normal.is_parallel(other.normal, **kwargs):\n raise ValueError(\"The planes must not be parallel.\")\n\n array_normals_stacked = np.vstack((self.normal, other.normal))\n\n # Construct a matrix for a linear system.\n array_00 = 2 * np.eye(3)\n array_01 = array_normals_stacked.T\n array_10 = array_normals_stacked\n array_11 = np.zeros((2, 2))\n matrix = np.block([[array_00, array_01], [array_10, array_11]])\n\n dot_a = np.dot(self.point, self.normal)\n dot_b = np.dot(other.point, other.normal)\n array_y = np.array([0, 0, 0, dot_a, dot_b])\n\n # Solve the linear system.\n solution = np.linalg.solve(matrix, array_y)\n\n point_line = Point(solution[:3])\n direction_line = self.normal.cross(other.normal)\n\n return Line(point_line, direction_line)", "def plane_dem_intersection(\n srcPlaneAttitude: Plane,\n srcPt: Point,\n geo_array: GeoArray,\n level_ndx: int=0) -> List[Point]:\n\n # dem values as a Numpy array\n\n q_d = geo_array.level(\n level_ndx=level_ndx)\n\n # row and column numbers of the dem\n\n row_num, col_num = q_d.shape\n\n # plane closure that, given (x, y), derive z\n\n plane_z_closure = srcPlaneAttitude.closure_plane_from_geo(srcPt)\n\n # plane elevations at grid cell centers\n\n q_p = array_from_function(\n row_num=row_num,\n col_num=col_num,\n geotransform=geo_array.gt,\n z_transfer_func=plane_z_closure)\n\n index_multiplier = 100 # sufficiently large value to ensure a precise slope values\n\n mi_p = xyarr2segmentslope(\n xy2z_func=plane_z_closure,\n arrij2xy_func=geo_array.ijArrToxy,\n i=index_multiplier,\n j=0) * np.ones((row_num, col_num))\n\n mj_p = xyarr2segmentslope(\n xy2z_func=plane_z_closure,\n arrij2xy_func=geo_array.ijArrToxy,\n i=0,\n j=index_multiplier) * np.ones((row_num, col_num))\n\n # 2D array of DEM segment parameters\n\n cell_size_j, cell_size_i = geo_array.geotransf_cell_sizes()\n\n mj_d = grad_j(\n fld=q_d,\n cell_size_j=cell_size_j)\n\n mi_d = grad_iminus(\n fld=q_d,\n cell_size_i=cell_size_i)\n\n # intersection points\n\n intersection_pts_j = segment_intersections_array(\n m_arr1=mj_d,\n m_arr2=mj_p,\n q_arr1=q_d,\n q_arr2=q_p,\n cell_size=cell_size_j)\n\n intersection_pts_j = arrayTo3DPts(\n direction='j',\n arr=intersection_pts_j,\n ij2xy_func=geo_array.ijArrToxy,\n xy2z_func=plane_z_closure)\n\n intersection_pts_i = segment_intersections_array(\n m_arr1=mi_d,\n m_arr2=mi_p,\n q_arr1=q_d,\n q_arr2=q_p,\n cell_size=cell_size_i)\n\n # filter out i-direction points coincident with those of j-direction\n\n #intersection_pts_i = intersection_pts_i[np.where( intersection_pts_i > 1e10-6 )]\n\n intersection_pts_i = arrayTo3DPts(\n direction='i',\n arr=intersection_pts_i,\n ij2xy_func=geo_array.ijArrToxy,\n xy2z_func=plane_z_closure)\n\n unique_pts = intersection_pts_j + intersection_pts_i\n\n return unique_pts" ]
[ "0.7197938", "0.6904024", "0.6653329", "0.6607719", "0.65378755", "0.6519269", "0.6485377", "0.6441739", "0.6334094", "0.6284775", "0.6276811", "0.6276351", "0.6261968", "0.62611717", "0.62223566", "0.6186185", "0.61805207", "0.6166154", "0.60925084", "0.60659647", "0.60162663", "0.60100645", "0.5983563", "0.5929647", "0.59074485", "0.59061986", "0.5903761", "0.58921856", "0.58573914", "0.58553725" ]
0.7335776
0
Build command string from parameters passed to object. Usage of paramter h in options is going to be ignored.
def build_command_string(self): if self._regex_helper.search_compiled(W._re_h, self.options): if self._regex_helper.group("SOLO"): self.options = self.options.replace('-h', '') else: self.options = self.options.replace('h', '') cmd = "{} {}".format("w", self.options) else: cmd = "{}".format("w") return cmd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_command(process):\n cmd = \"{} \".format(process.name)\n for o in process.options.opt_list:\n i = 0\n opt = \"\"\n for el in o: \n if el and el != \"input\" and el != \"output\" and i != 3:\n opt += str(el)\n if opt[-1] != \"=\" and opt[-1] != \"'\": # command without space\n opt += \" \" # space\n i += 1\n cmd += opt\n return cmd", "def command_and_args(self) -> str:\n if self.command and self.args:\n rtn = f'{self.command} {self.args}'\n elif self.command:\n # there were no arguments to the command\n rtn = self.command\n else:\n rtn = ''\n return rtn", "def build_command_line_parameters(params, command_name=\"-param\"):\n if params is None:\n return \"\"\n res = []\n for k, v in sorted(params.items()):\n if '\"' in v:\n v = v.replace('\"', '\\\\\"')\n one = '{2} {0}=\"{1}\"'.format(k, v, command_name)\n res.append(one)\n return \" \".join(res)", "def generate_command_string(self, operation, *args, **kwargs):\n cmd = [self.terraform_binary_path, operation]\n\n for key, value in kwargs.items():\n if key == \"var\":\n for varkey, varval in value.items():\n option = \"-var=\"\n option += \"'%s=%s'\" % (varkey, varval)\n cmd.append(option)\n else:\n option = \"\"\n if \"_\" in key:\n key = key.replace(\"_\", \"-\")\n\n if value == \"IsFlag\":\n option = \"-%s\" % key\n else:\n option = \"-%s=%s\" % (key, value)\n cmd.append(option)\n\n if len(args) > 0:\n for arg in args:\n cmd.append(arg)\n\n return \" \".join(cmd)", "def makecmd(self, options):", "def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE", "def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):\n command = _arg_to_flag(k)\n if v is not None:\n command += f' {v}'\n return command", "def buildCommand(self, kwargs):\r\n self.command = \"\"\r\n try:\r\n if not self.isEnabled():\r\n return\r\n except Exception, e:\r\n print \"<ERROR>\", e\r\n return\r\n self.command = self.app\r\n \r\n \r\n \r\n # filename should be last in the command, so iterate again\r\n for key in kwargs:\r\n if key == 'filename':\r\n if type(kwargs[key]) == str:\r\n f = kwargs[key]\r\n if os.path.exists(f):\r\n self.command += \" \" + str(f)\r\n else:\r\n self.command = \"\"\r\n raise Exception, \"File does not exist!\"\r\n else:\r\n self.command = \"\"\r\n raise Exception, \"File needs to be a string.\"", "def _build_send_optode_command(self, cmd, command):\n return \"%s=%s%s\" % (cmd, command, self._newline)", "def __buildOptionString ( self ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings defining each element of self.switchSpecs\n # as getopt.getopt requires it ]\n for sw in self.switchSpecs:\n #-- 2 body --\n # [ if sw is a SwitchArg ->\n # result +:= a string defining sw as getopt.getopt\n # requires it ]\n if sw.takesValue:\n result.append ( \"%s:\" % sw.letter )\n else:\n result.append ( sw.letter )\n\n #-- 3 --\n # [ return the strings in result, concatenated ]\n return \"\".join ( result )", "def create_command(command: str, *parameters) -> str:\n if parameters and isinstance(parameters, tuple) and isinstance(parameters[0], tuple):\n parameters = parameters[0]\n str_param: str = ' '.join([str(param) for param in parameters]) if parameters else \"\"\n result = command+' ' + str_param + '\\r\\n' if str_param else command + '\\r\\n'\n return result", "def _get_cmd(cls, command, f_config, verbose=False):\n if command not in cls.COMMANDS:\n raise KeyError('Could not recongize command \"{}\". '\n 'Available commands are: {}'\n .format(command, cls.COMMANDS))\n cmd = cls.CMD_BASE.format(fp_config=f_config, command=command)\n if verbose:\n cmd += ' -v'\n\n return cmd", "def __str__(self):\n self._validate()\n commandline = \"%s \" % self.program_name\n for parameter in self.parameters:\n if parameter.is_set:\n #This will include a trailing space:\n commandline += str(parameter)\n return commandline.strip() # remove trailing space", "def format_command(cls, arg: int = 9600) -> str:\n if arg not in cls.arguments:\n raise ArgumentError(f\"arg not one of {cls.arguments}\")\n return f\"{cls.name},{arg}\"", "def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )", "def create_ipmi_ext_command_string(command, **options):\n\n new_options = collections.OrderedDict()\n for option in ipmi_required_options:\n # This is to prevent boot table \"-N 10\" vs user input timeout.\n if \" -N \" in command and option == \"N\":\n continue\n if option in options:\n # If the caller has specified this particular option, use it in\n # preference to the default value.\n new_options[option] = options[option]\n # Delete the value from the caller's options.\n del options[option]\n else:\n # The caller hasn't specified this required option so specify it\n # for them using the global value.\n var_name = 'ipmi_' + ipmi_option_name_map[option]\n value = eval(var_name)\n new_options[option] = value\n # Include the remainder of the caller's options in the new options\n # dictionary.\n for key, value in options.items():\n new_options[key] = value\n\n return gc.create_command_string('ipmitool', command, new_options)", "def build_cmdline():\n\tcmd=optparse.OptionParser(version=__version__)\n\tcmd.add_option('-c', '', dest='config_fname',type=\"string\", help='WHM/WHMCS configuration file', metavar=\"FILE\")\n\tcmd.add_option('-s', '', dest=\"whm_section\", type=\"string\", help=\"WHM server to use. Specify section name. eg: -s ds01\", metavar=\"SERVER\")\n\tcmd.add_option('','--search', action=\"store\", dest='search', type=\"string\", help=\"Search client by DNS domain name or cPanel username\", metavar=\"STRING\")\n\tcmd.add_option('-d', '', dest='whmcs_deptid', type=\"int\", help=\"WHMCS Department ID\", metavar=\"INT\") \n\tcmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type=\"string\", help=\"WHMCS abuse ticket template file\", metavar='FILE')\n\tcmd.add_option('-r', '', dest='whm_suspendmsg_fname', type=\"string\", help='cPanel account suspension reason template file', metavar='FILE')\n\tcmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type=\"string\", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE')\n\tcmd.add_option('', '--subject', dest='whmcs_subject', type=\"string\", help='Specify abuse ticket subject title.', metavar=\"STRING\")\n\tcmd.add_option('-y', '--allyes', dest='allyes', action=\"store_true\", default=False, help='Assume yes as an answer to any question which would be asked')\n\treturn cmd", "def __str__(self):\n if not self._args and not self.subcommand:\n return self.cmd\n elif not self._args and self.subcommand:\n return '{} {}'.format(\n self.cmd, self.subcommand)\n elif self._args and not self.subcommand:\n return '{} {}'.format(\n self.cmd, ' '.join(self._args))\n else:\n return '{} {} {}'.format(\n self.cmd, self.subcommand, ' '.join(self._args))", "def _cmd_builder(self, test_config):\n arg_str = ''\n for key, value in sorted(test_config['args'].items()):\n arg_str += '--{} {} '.format(key, value)\n return test_config['pycmd'].format(arg_str)", "def _build_direct_command(self, cmd, arg):\n return \"%s%s\" % (arg, self._newline)", "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardware_address}</HardwareAddress></DeviceDetails>\".format(hardware_address=hardware_address)\n\n if comp_var_dict is not None:\n comp_keys = comp_var_dict.keys()\n if len(comp_keys) > 0:\n for comp_key in comp_keys:\n # Build requested variable list\n command += \"<Components><Component><Name>{comp_key}</Name><Variables>\".format(comp_key=comp_key)\n variables = comp_var_dict[comp_key]\n for var in variables:\n command += \"<Variable><Name>{var}</Name></Variable>\".format(var=var)\n command += \"</Variables></Component></Components>\"\n else:\n # Request all variables from all components\n command += \"<Components><All>Y</All></Components>\"\n\n # Close command\n command += \"</Command>\"\n \n return command", "def _BuildCommand(self, command_name, parameter_files=None, **kwargs):\n command = [YCSB_EXE, command_name, self.database]\n\n parameters = self.parameters.copy()\n parameters.update(kwargs)\n\n # Adding -s prints status which includes average throughput per sec.\n if _THROUGHPUT_TIME_SERIES.value and command_name == 'run':\n command.append('-s')\n parameters['status.interval'] = _STATUS_INTERVAL_SEC\n\n # These are passed as flags rather than properties, so they\n # are handled differently.\n for flag in self.FLAG_ATTRIBUTES:\n value = parameters.pop(flag, None)\n if value is not None:\n command.extend(('-{0}'.format(flag), str(value)))\n\n for param_file in list(self.parameter_files) + list(parameter_files or []):\n command.extend(('-P', param_file))\n\n for parameter, value in parameters.items():\n command.extend(('-p', '{0}={1}'.format(parameter, value)))\n\n return 'cd %s && %s' % (YCSB_DIR, ' '.join(command))", "def generic(self, switches=[\"--help\"]):\n return self._command_template(switches)", "def build_options(self):\n opts = [\n \"-k rpm.rpmva=off\",\n \"-k apache.log=True\",\n ]\n\n sensitive_keys = {\n self._engine_plugin: 'sensitive_keys',\n 'ovirt_engine_dwh': 'dwh_sensitive_keys',\n }\n if self.configuration['include_sensitive_data']:\n for plugin in sensitive_keys:\n self.configuration[sensitive_keys[plugin]] = ':'\n\n for plugin in sensitive_keys:\n if self.configuration.get(sensitive_keys[plugin]):\n opts.append(\n '-k {plugin}.sensitive_keys={keys}'.format(\n plugin=plugin,\n keys=self.configuration.get(sensitive_keys[plugin]),\n )\n )\n\n if self.configuration.get(\"ticket_number\"):\n opts.append(\n \"--ticket-number=%s\" % self.configuration.get(\"ticket_number\")\n )\n\n if self.sos_version < '30':\n opts.append('--report')\n\n if self.configuration.get(\"log_size\"):\n opts.append(\n \"--log-size=%s\" %\n self.configuration.get('log_size')\n )\n else:\n if self.sos_version < '30':\n opts.append('--report')\n opts.append(\"-k general.all_logs=True\")\n elif self.sos_version < '32':\n opts.append(\"-k logs.all_logs=True\")\n else:\n opts.append(\"--all-logs\")\n\n if self.configuration.get(\"upload\"):\n opts.append(\"--upload=%s\" % self.configuration.get(\"upload\"))\n return \" \".join(opts)", "def GenerateToolArgStrings(options):\n # Preparing dnstreexport\n dnstreeexport_array = [options.tree_export]\n dnstreeexport_array.extend(['-c', options.config_file])\n if( options.force ):\n dnstreeexport_array.append('--force')\n if( options.quiet ):\n dnstreeexport_array.append('--quiet')\n dnstreeexport_arg_string = ' '.join(dnstreeexport_array)\n\n # Preparing dnscheckconfig\n dnscheckconfig_array = [options.check_config]\n dnscheckconfig_array.extend(['-i', '%s' % options.id])\n dnscheckconfig_array.extend(['--config-file', options.config_file])\n if( options.named_checkzone ):\n dnscheckconfig_array.extend(['-z', options.named_checkzone])\n if( options.named_checkconf ):\n dnscheckconfig_array.extend(['-c', options.named_checkconf])\n if( not options.quiet ):\n dnscheckconfig_array.append('-v')\n dnscheckconfig_arg_string = ' '.join(dnscheckconfig_array)\n\n # Preparing dnsservercheck\n dnsservercheck_array = [options.server_check]\n dnsservercheck_array.extend(['--export-config'])\n dnsservercheck_array.extend(['-c', options.config_file])\n dnsservercheck_array.extend(['-i', '%s' % options.id])\n dnsservercheck_arg_string = ' '.join(dnsservercheck_array)\n\n # Preparing dnsconfigsync\n dnsconfigsync_array = [options.config_sync]\n dnsconfigsync_array.extend(['--export-config'])\n dnsconfigsync_array.extend(['-i', '%s' % options.id])\n dnsconfigsync_array.extend(['-c', options.config_file])\n if( options.ssh_id ):\n dnsconfigsync_array.extend(['--ssh-id', options.ssh_id])\n if( options.rndc_exec ):\n dnsconfigsync_array.extend(['--rndc-exec', options.rndc_exec])\n if( options.rndc_port ):\n dnsconfigsync_array.extend(['--rndc-port', options.rndc_port])\n if( options.rndc_key ):\n dnsconfigsync_array.extend(['--rndc-key', options.rndc_key])\n if( options.rndc_conf ):\n dnsconfigsync_array.extend(['--rndc-conf', options.rndc_conf])\n dnsconfigsync_arg_string = ' '.join(dnsconfigsync_array)\n\n # Preparing dnsquerycheck\n dnsquerycheck_array = [options.query_check]\n dnsquerycheck_array.extend(['--export-config'])\n dnsquerycheck_array.extend(['-c', options.config_file])\n dnsquerycheck_array.extend(['-i', '%s' % options.id])\n dnsquerycheck_array.extend(['-n', '%s' % options.number])\n dnsquerycheck_array.extend(['-p', '%s' % options.port])\n dnsquerycheck_arg_string = ' '.join(dnsquerycheck_array)\n\n return [dnstreeexport_arg_string,\n dnscheckconfig_arg_string,\n dnsservercheck_arg_string,\n dnsconfigsync_arg_string, \n dnsquerycheck_arg_string]", "def _extract_command(self, args):\n opts = self.gopts[:]\n for cmd in self.ctable.values():\n opts.extend(cmd.opts)\n sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)\n\n lopts,largs = getopt.getopt(args, sfl, lfl)\n if not largs:\n return None\n return self._command(largs[0])", "def buildCmd( tcmpCmd, cmd, target, sequence, fieldList):\n cmdList = [tcmpCmd, cmd, target, sequence, fieldList]\n\n return \"<{cmd}>\".format(cmd=\":\".join(cmdList))", "def formatCommand(command):\n cmdstr=\"\"\n logging.debug(repr(command))\n for arg in command:\n if \" \" in arg:\n cmdstr=cmdstr+\" \\\"\"+arg+\"\\\"\"\n else:\n cmdstr=cmdstr+\" \"+arg\n return cmdstr", "def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)", "def _get_command_args_syntax_help_string(command, is_no_command, args):\n syntax_string = ''\n if args:\n for i, arg in enumerate(args):\n if i > 0:\n syntax_string += ' '\n \n if _is_string(arg):\n syntax_string += arg\n continue\n\n if type(arg) == tuple:\n if debug.description():\n print _line(), command['self'], arg\n\n if is_no_command:\n optional = arg.get('optional-for-no', False)\n else:\n optional = arg.get('optional', False)\n if optional:\n syntax_string += '['\n \n choices = arg.get('choices')\n nested_args = arg.get('args')\n if nested_args:\n if choices:\n raise error.CommandDescriptionError('An argument can\\'t have both '\n '\"choices\" and \"args\" attributes', command)\n choices = (nested_args,)\n if choices:\n # Suppress choice delimiters if we've already emitted the square\n # brackets to indicate an optional argument. This is so we get\n # something simpler (e.g. \"[this | that]\" ) instead of getting\n # doubled delimiters (e.g. \"[{this | that}]\").\n if not optional:\n syntax_string += '{'\n \n for j, choice in enumerate(choices):\n if j > 0:\n syntax_string += ' | '\n choice_args = _get_choice_args(choice)\n choice_syntax_string = _get_command_args_syntax_help_string(command,\n is_no_command,\n choice_args)\n syntax_string += choice_syntax_string\n \n if not optional:\n syntax_string += '}'\n else:\n field = arg.get('field')\n \n tag = arg.get('tag')\n if tag:\n syntax_string += tag + ' '\n \n token = arg.get('token')\n if token:\n syntax_string += token\n\n if (field != None) and (arg.get('type') != 'boolean'):\n help_name = arg.get('help-name')\n if help_name:\n help_name = '<' + help_name + '>'\n else:\n if arg.get('type') == 'enum':\n values = arg.get('values')\n if values:\n if _is_string(values):\n values = (values,)\n help_name = ' | '.join(values)\n if len(values) > 1:\n help_name = '{' + help_name + '}'\n if not help_name:\n help_name = '<' + field + '>'\n syntax_string += help_name\n if optional:\n syntax_string += ']'\n \n return syntax_string" ]
[ "0.6516497", "0.64271504", "0.6422479", "0.63904124", "0.6238994", "0.6229805", "0.62154454", "0.6114734", "0.6070556", "0.5986263", "0.59837586", "0.5973345", "0.59382427", "0.5915507", "0.5883363", "0.5877054", "0.58658147", "0.58613133", "0.58564746", "0.585302", "0.58369714", "0.5796753", "0.577163", "0.57699174", "0.57617456", "0.57166255", "0.56980675", "0.56980383", "0.5696898", "0.56919104" ]
0.7719122
0
Parse general information in line and update it to GENERAL_INFO dictionary.
def _parse_general_info(self, line): if self._regex_helper.search_compiled(W._re_general_info, line): self.current_ret['GENERAL_INFO'].update({ 'time': datetime.datetime.strptime(self._regex_helper.group("TIME"), '%H:%M:%S').time(), 'uptime': self._regex_helper.group("UPTIME"), 'user_number': self._regex_helper.group("USER_NUMBER"), 'load_average': self._regex_helper.group("L_AVERAGE") }) raise ParsingDone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def _parse_general_info_V4X(par, parfile):\n line = None\n while line != '':\n pos = parfile.tell()\n line = parfile.readline()\n #Parse the useful parts of the general info entry on the left and\n #right of the colon: key and value\n m = re.search(r'\\. ([^<>\\(\\)\\[\\]]*[a-zA-Z]).*: *(.*)', line)\n if not m:\n parfile.seek(pos)\n break\n key, val = m.group(1, 2)\n key = _sanitize_to_identifer(key).lower()\n #Try to guess the type of the field by conversion\n _val_split = val.split()\n if len(_val_split) > 1:\n try:\n val = np.array(tuple(float(x) for x in _val_split))\n except:\n pass\n else:\n try:\n val = int(val)\n except ValueError:\n pass\n #logger.debug(\"Key = '{0}' Val = '{1}'\".format(key, val))\n setattr(par.gen_info, key, val)\n return par.gen_info", "def parse_ensembl_line(line, header):\n line = line.rstrip().split(\"\\t\")\n header = [head.lower() for head in header]\n raw_info = dict(zip(header, line))\n\n ensembl_info = {}\n\n for word in raw_info:\n value = raw_info[word]\n if not value:\n continue\n\n if \"chromosome\" in word:\n ensembl_info[\"chrom\"] = value\n if \"gene name\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"hgnc id\" in word:\n ensembl_info[\"hgnc_id\"] = int(value.split(\":\")[-1])\n if \"hgnc symbol\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"strand\" in word:\n ensembl_info[\"strand\"] = int(value)\n\n update_gene_info(ensembl_info, word, value)\n update_transcript_info(ensembl_info, word, value)\n update_exon_info(ensembl_info, word, value)\n update_utr_info(ensembl_info, word, value)\n update_refseq_info(ensembl_info, word, value)\n return ensembl_info", "def _parse_line(self, line):\n msg_info = {'raw_message': line}\n line_split = line.split(None, 2)\n try:\n msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format)\n msg_info['message'] = line_split[2]\n except (ValueError, IndexError):\n pass\n return msg_info", "def get_read_group_info(line, logger=default_logger):\n\n rg_dict = dict()\n #Initialize the dictionary, so we know if any fields are missing\n rg_dict[\"PI\"] = \"\"\n rg_dict[\"CN\"] = \"UNKNOWN\"\n rg_dict[\"ID\"] = \"\"\n rg_dict[\"PL\"] = \"UNKNOWN\"\n rg_dict[\"LB\"] = \"\"\n rg_dict[\"SM\"] = \"\"\n rg_dict[\"PU\"] = \"\"\n rg_dict[\"DT\"] = \"\"\n sline = line.split('\\t')\n\n for item in sline:\n item = item.strip()\n\n if(item.startswith(\"ID:\")):\n rg_dict[\"ID\"] = item[3:]\n elif(item.startswith(\"PL:\")):\n rg_dict[\"PL\"] = item[3:]\n elif(item.startswith(\"PU:\")):\n item = item.replace(\".\", \"_\") #to agree with ICGC SOP\n rg_dict[\"PU\"] = item[3:]\n elif(item.startswith(\"LB:\")):\n rg_dict[\"LB\"] = item[3:]\n elif(item.startswith(\"DT:\")):\n rg_dict[\"DT\"] = item[3:]\n elif(item.startswith(\"SM:\")):\n rg_dict[\"SM\"] = item[3:]\n elif(item.startswith(\"CN:\")):\n rg_dict[\"CN\"] = item[3:]\n elif(item.startswith(\"PI:\")):\n rg_dict[\"PI\"] = item[3:]\n else:\n pass\n\n for key,value in rg_dict.items():\n if value == \"\":\n logger.warning(\"missing RG field %s\" % key)\n\n return rg_dict", "def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))", "def fill_project_info(self, line_read, new_format):\n if new_format:\n \"\"\" list(set()) converts list into a set to remove duplicates, but\n does not preserve order. dict key must always be 1st element\n in line_read else the project_info dict will not be updated\n properly. The code below gets rid of duplicated values while\n preserving the order of the elements in line_read. Code was\n taken from a StackOverflow thread about the same issue. \"\"\"\n tmp_list = set() # new empty set\n tmp_add = tmp_list.add # built-in method 'add' of set object\n temp = [x for x in line_read if not (x in tmp_list or tmp_add(x))]\n line_read = temp\n else:\n line_read = line_read.split(':', 1)\n line_read[0] = re.sub('[- ]', '', line_read[0])\n key_name = line_read[0].lower()\n\n if 'sapid' in key_name:\n self.project_info.update({'SAP ID': line_read[1].strip(' ')})\n if 'golive' in key_name:\n self.project_info.update({'Go Live Date': line_read[1]})\n if 'customer' in key_name:\n site_name = re.sub('State|Lottery', '', line_read[1])\n site_name = site_name.strip(' ')\n self.project_info.update({'Site': site_name})", "def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))", "def getLineInformation(line):\n \n pass", "def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}", "def _process_line(line, status):\n\n if line.startswith(ADAPTER_LINE_STARTSWITH):\n status.add_block('adapter', 'name', line)\n return\n elif line.startswith(EXIT_LINE_STARTSWITH):\n status.consolidate()\n return\n\n key, value = [el.strip(' \\t\\r\\n') for el in line.split(':', 1)]\n\n if key in KEY_TO_CONTEXT.keys():\n status.add_block(KEY_TO_CONTEXT[key], key, value)\n else:\n status.set_property(key, value)", "def parse_info(s:str) -> dict:\n d = {}\n d[\"SVTYPE\"] = re.search(r'(?<=SVTYPE=)\\w+',s).group(0)\n d[\"SUPPORT\"] = re.search(r'(?<=SUPPORT=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"BND\"]:\n return d\n d[\"END\"] = re.search(r'(?<=END=)\\d+',s).group(0)\n if d[\"SVTYPE\"] in [\"INV\"]:\n return d\n d[\"SVLEN\"] = re.search(r'(?<=SVLEN=)(.*?)(?=;)',s).group(0)\n d[\"READS\"] = re.search(r'(?<=READS=)(.*?)(?=$)',s).group(0).split(\",\")\n if d[\"SVTYPE\"] == \"INS\":\n d[\"SEQS\"] = re.search(r'(?<=SEQS=)(.*?)(?=;)',s).group(0).split(\",\")\n return d", "def parse_line(self, line):\n raise NotImplementedError", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def process_entry(self):\n\n for line_item in self.entry:\n pairs = line_item.split(' ')\n for pair in pairs:\n if ':' in pair:\n key, value = pair.split(':')\n if value.isdigit():\n self.fields[key] = int(value)\n else:\n self.fields[key] = value", "def parse_aunt_information(line: str) -> (int, Mapping[str, int]):\n sue_number, parameters = re.split(': ', line, maxsplit=1)\n number = int(re.sub('Sue ', '', sue_number))\n\n def parse_param(description: str) -> (str, int):\n \"\"\"The name and value of the described parameter.\"\"\"\n name, value = re.split(': ', description)\n return name, int(value)\n\n return number, dict(map(parse_param, re.split(', ', parameters)))", "def parse_info(self, info_lines):\n info = {}\n for line in info_lines:\n if not line or line.startswith(\"#\"):\n continue\n\n if \":\" not in line:\n collectd.warning(\"redis_info plugin: Bad format for info line: %s\" % line)\n continue\n\n key, val = line.split(\":\", 1)\n\n # Handle multi-value keys (for dbs and slaves).\n # db lines look like \"db0:keys=10,expire=0\"\n # slave lines look like\n # \"slave0:ip=192.168.0.181,port=6379,\n # state=online,offset=1650991674247,lag=1\"\n if \",\" in val:\n split_val = val.split(\",\")\n for sub_val in split_val:\n k, _, v = sub_val.rpartition(\"=\")\n sub_key = \"{0}_{1}\".format(key, k)\n info[sub_key] = v\n else:\n info[key] = val\n\n # compatibility with pre-2.6 redis (used changes_since_last_save)\n info[\"changes_since_last_save\"] = info.get(\"changes_since_last_save\", info.get(\"rdb_changes_since_last_save\"))\n\n return info", "def parse_line(self, line):\n line = line.strip()\n log.debug(\"Parsing line: '{}'\".format(line))\n if len(line) == 0:\n log.warning(\"Zero length line detected\")\n return\n split = line.split(DELIMETER)\n key = split[0]\n if key in FORMATS:\n log.debug(\"Using formatter for key: {}\".format(key))\n formatter = FORMATS[key]\n for (name, parser), value in zip(formatter,split[1:]):\n self._params[name] = parser(value)\n log.info(\"Parameters: {}\".format(self._params))\n self.notify_watchers()\n else:\n log.debug(\"Invalid key: {}\".format(key))", "def process(line):\n output = {}\n\n fields = line.split(\" \")\n if len(fields) < 6:\n return False\n \n output[\"log_level\"] = fields[0]\n \n # Convert date/time to object or reject line\n try:\n output[\"datetime\"] = datetime.datetime.strptime(\n \"%s %s\" % (fields[1], fields[2]),\n \"%Y-%m-%d %H:%M:%S,%f\"\n )\n except ValueError:\n return False\n \n output[\"logger\"] = fields[3]\n output[\"thread\"] = fields[4]\n output[\"process\"] = fields[5]\n output[\"message\"] = \" \".join(fields[6:])\n \n return output", "def ParseLine(line):\n fields = line.split()\n ip = fields[0]\n datestr = ' '.join(fields[3:5])[1:-1]\n timestamp = datetime.strptime(\n datestr, '%d/%b/%Y:%H:%M:%S %z'\n ).timestamp()\n command = fields[5][1:]\n uri = fields[6]\n protocol = fields[7][:-1]\n status = int(fields[8])\n size = int(fields[9])\n meta = [var.strip('\"') for var in fields[11:-1]]\n return {\n 'timestamp': timestamp,\n 'ip': ip,\n 'command': command,\n 'uri': uri,\n 'protocol': protocol,\n 'status': status,\n 'size': size,\n 'meta': meta\n }", "def get_line_desc(self, line):\n return dict(zip(self.header, line))", "def _parse_line(line: Match[str]) -> dict:\n request = line.group(\"request\")\n request = request.split()\n req_method = request[0] # GET, POST, PUT, etc.\n url = request[1]\n x = url.split(\"/\")[3:]\n uri = f'/{\"/\".join(x)}'\n\n timestamp = line.group(\"timestamp\") # timestamp in ISO format\n timestamp = MyTime._try_isoformat(timestamp, tzinfo=\"UTC\").dt\n\n res = {\n \"url\": url,\n \"uri\": uri,\n \"req_method\": req_method,\n \"timestamp\": timestamp,\n \"user_agent\": line.group(\"user_agent\"),\n }\n return res", "def parse_user_dict(self, line):\n pass", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def parseLine(self, line):\n\n # Bail out on lines with a malformed timestamp\n try:\n timestamp = time.mktime(time.strptime(line[1:25], \"%a %b %d %H:%M:%S %Y\"))\n except:\n return\n \n text = line[27:]\n \n if self.myname: \n self.attendance.mark(timestamp, self.myname)\n text = self.re_myname.sub(self.myname + ' ', text) \n \n damage = self.re_damage.search(text)\n #damage = False\n death = self.re_death.search(text)\n #death = False\n miss = self.re_miss.search(text)\n #miss = False\n #defensive = self.re_defensive.search(text)\n defensive = False\n loot = self.re_loot.search(text)\n attendance = self.re_attendance.search(text)\n if damage:\n (attacker, atktype, defender, amount, nonmelee) = damage.groups()\n if nonmelee:\n atktype = 'non-melee'\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, int(amount))\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif miss:\n (attacker, atktype, defender) = miss.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, 'miss')\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif defensive:\n (attacker, atktype, defender, defensetype) = defensive.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, defensetype)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif death:\n (defender, junk, attacker) = death.groups()\n if junk.count('have slain'):\n (defender, attacker) = (attacker, defender)\n # Use PC deaths to track their attendance\n if defender.count(' ') == 0:\n self.attendance.mark(timestamp, defender)\n elif attacker.count(' ') == 0:\n self.kills.addKill(timestamp, defender)\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.addDeath(timestamp, attacker, defender)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n elif loot:\n (looter, item) = loot.groups()\n self.loot.addLoot(timestamp, looter, item)\n self.attendance.mark(timestamp, looter)\n elif attendance:\n attendee = attendance.group(1)\n self.attendance.mark(timestamp, attendee)", "def Process_line(line, summary):\n line = line.lower() # lower all words in the line\n line = line.translate(str.maketrans('','', string.punctuation)) # Remove all punctuation\n words = line.split() # Split word from the line\n add_word(words, summary) # Add word to dictionary", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def _parse_line(self):\n # check if line contains a rule or not\n stripped = self._line.strip()\n if not stripped or stripped.startswith(\"#\"):\n return None\n\n # strip out double quotes from values, and simplify equals strings\n simplified = self._line.replace(\"==\", \"=\").replace('\"', '')\n\n # return a dictionary formed from the key=value pairs found in line\n return dict(f.strip().split(\"=\", 1) for f in simplified.split(\",\"))", "def process_log_line(self, line):\n int_map = self.int_map\n timestamp = line[0:26]\n if len(timestamp) >= 26:\n msg = {}\n try:\n # %Y-%m-%d %H:%M:%S.%f - 2017-06-27 13:46:10.048844\n day = int_map[timestamp[8:10]]\n hour = int_map[timestamp[11:13]]\n minute = int_map[timestamp[14:16]]\n second = int_map[timestamp[17:19]]\n usecond = int_map[timestamp[20:22]] * 10000 + \\\n int_map[timestamp[22:24]] * 100 + int_map[timestamp[24:26]]\n event_time = (hour * 3600.0 + minute * 60.0 + second) + (usecond / 1000000)\n if day == self.start_day:\n elapsed = event_time - self.start_time\n else:\n elapsed = event_time + (float(3600 * 24) - self.start_time)\n msg['timestamp'] = elapsed\n if msg['timestamp'] >= 0:\n offset = line.find(']: ', 32)\n if offset >= 0:\n try:\n thread = line[34:offset]\n separator = thread.find(':')\n if separator >= 0:\n thread = thread[separator + 1:].strip()\n msg['thread'] = thread\n msg['level'] = line[offset + 3:offset + 4]\n msg_start = line.find(' ', offset + 5)\n if msg_start >= 0:\n msg['category'] = line[offset + 5:msg_start]\n msg['message'] = line[msg_start + 1:]\n if msg['category'] == 'nsHttp':\n if msg['thread'] == 'Main Thread':\n self.main_thread_http_entry(msg)\n elif msg['thread'] == 'Socket Thread':\n self.socket_thread_http_entry(msg)\n elif msg['category'] == 'nsSocketTransport':\n self.socket_transport_entry(msg)\n elif msg['category'] == 'nsHostResolver':\n self.dns_entry(msg)\n except Exception:\n logging.exception('Error processing log line')\n except Exception:\n pass" ]
[ "0.6655572", "0.65438634", "0.61774", "0.6080733", "0.6014996", "0.59691495", "0.5966504", "0.5905911", "0.58785135", "0.583794", "0.5830027", "0.57280356", "0.5722466", "0.56339806", "0.562032", "0.5615857", "0.5602782", "0.5572568", "0.5570758", "0.5545635", "0.5519029", "0.5480071", "0.5456602", "0.5442831", "0.5408473", "0.5386417", "0.53773934", "0.5375964", "0.5373519", "0.536676" ]
0.8499883
0
Parse V option output in line and append it to RESULT list.
def _parse_v_option(self, line): if self._regex_helper.search_compiled(W._re_v_option, line): self.current_ret['RESULT'].append(self._regex_helper.group("V_OPTION")) raise ParsingDone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processOption (self, line) :\n ll = line.split ('=')\n if len (ll) < 2:\n print \"Cannot parse option \" , line\n sys.exit()\n result = (ll[0].strip() , ll[1].strip())\n return result", "def _parse_results(self):\n for line in self.file_dic['output'].splitlines():\n if line.startswith(' * GAMESS VERSION = '):\n temp = line.split('=')[1]\n temp = temp.split('*')[0]\n self.version = temp.strip()\n\n if line[1:25] == 'FREE ENERGY OF SOLVATION' and line.find('1 ATM') == -1:\n temp = line.split()\n #Take the next number after =\n #In KCAL/MOL\n self.solvation_energy = float(temp[temp.index(\"=\") + 1])", "def parse_output(result):\n output = result['output']\n parsed = output.split('\\n')\n output = []\n for _line in parsed:\n output.append(_line.strip())\n log.debug(_line)\n return output", "def _do_option(self, line: str) -> None:\n if line.startswith(\"option verbosity\"):\n self._verbosity = Level(int(line[len(\"option verbosity \") :]))\n _write(\"ok\")\n else:\n _write(\"unsupported\")", "def parse_results(variants):\n out = []\n\n # set header\n lines = variants[0].get_output().split('\\n')\n for line in lines[:-1]:\n out.append(line.split('\\t')[0])\n\n # append output for all variants to single list\n for var in variants:\n lines = var.get_output().split('\\n')\n for i in range(0, len(lines) - 1):\n out[i] += '\\t{}'.format(lines[i].split()[1])\n\n return out", "def v_action(option,opt_str,value,parser):\n cmdline_main.message(\"Enabling verbose message output.\")\n if hasattr(parameterized,'get_logger'):\n parameterized.get_logger().setLevel(parameterized.VERBOSE)\n else: # For versions of the param package before 9 May 2013\n parameterized.min_print_level=parameterized.VERBOSE", "def on_new_line(self, line, is_full_line):\n try:\n if is_full_line:\n self._parse_v_option(line)\n self._parse_general_info(line)\n self._parse_header(line)\n except ParsingDone:\n pass # line has been fully parsed by one of above parse-methods\n return super(W, self).on_new_line(line, is_full_line)", "def __init__(self, optv):\n self.__p4optv = optv\n # Treat '-g' like '-G' except the marshal'ed Python dicts\n # will be unmarshal'ed.\n if '-g' in self.__p4optv:\n self.__p4optv[self.__p4optv.index('-g')] = '-G'\n self.__unmarshal = 1\n else:\n self.__unmarshal = 0\n # Drop '-s'. 'p4' implements this on the client side and so\n # should 'px' (XXX though it does not yet), so the option should\n # not be passed to the server.\n if '-s' in self.__p4optv:\n self.__p4optv.remove('-s')\n log.warn(\"dropping '-s' option, px cannot yet handle it\")\n _ListCmd.__init__(self)", "def parse_options():\n\n from optparse import OptionParser\n usage = r\"\"\"%prog [options] <voxel_file>\"\"\"\n p = OptionParser(usage=usage)\n p.add_option('-o', '--output', action='store', dest='output',\n default='plot', help='Path to output SILO or VTK file.')\n p.add_option('-v', '--vtk', action='store_true', dest='vtk',\n default=False, help='Flag to convert to VTK instead of SILO.')\n parsed = p.parse_args()\n if not parsed[1]:\n p.print_help()\n return parsed\n return parsed", "def process_options(self):\n\n argv = sys.argv\n\n # process any optlist_ options\n self.valid_opts.check_special_opts(argv)\n\n # process terminal options without the option_list interface\n # (so that errors are not reported)\n\n # if no arguments are given, do default processing\n if '-help' in argv or len(argv) < 2:\n print(g_help_string)\n return 1\n\n if '-hist' in argv:\n print(g_history)\n return 1\n\n if '-show_valid_opts' in argv:\n self.valid_opts.show('', 1)\n return 1\n\n if '-ver' in argv:\n print(g_version)\n return 1\n\n # ============================================================\n # read options specified by the user\n self.user_opts = OL.read_options(argv, self.valid_opts)\n uopts = self.user_opts # convenience variable\n if not uopts: return -1 # error condition\n\n # ------------------------------------------------------------\n # process verb first\n\n val, err = uopts.get_type_opt(int, '-verb')\n if val != None and not err: self.verb = val\n\n # ------------------------------------------------------------\n # process options sequentially, to make them like a script\n errs = 0\n for opt in self.user_opts.olist:\n # check for anything to skip\n if opt.name == '-verb': pass\n\n elif opt.name == '-infiles':\n self.infiles, err = uopts.get_string_list('', opt=opt)\n if self.infiles == None or err:\n print('** failed to read -infiles list')\n errs +=1\n\n self.parse_infile_names()\n\n elif opt.name == '-overwrite':\n self.overwrite = 1\n\n elif opt.name == '-separator':\n self.separator, err = uopts.get_string_opt('', opt=opt)\n if self.separator == None or err:\n print(\"** bad -tablefile option\")\n errs += 1\n if self.separator == 'tab': self.separator = '\\t'\n elif self.separator == 'whitespace': self.separator = 'ws'\n self.seplen = len(self.separator)\n\n elif opt.name == '-showlabs':\n self.showlabs = 1\n\n elif opt.name == '-show_missing':\n self.show_missing = 1\n\n elif opt.name == '-tablefile':\n self.tablefile, err = uopts.get_string_opt('', opt=opt)\n if self.tablefile == None or err:\n print(\"** bad -tablefile option\")\n errs +=1\n\n else:\n oind = self.user_opts.olist.index(opt)\n print('** unknown option # %d: %s' % (oind+1, opt.name))\n errs += 1\n break\n\n # allow early and late error returns\n if errs: return -1\n\n # ------------------------------------------------------------\n # apply any trailing logic\n\n if len(self.infiles) < 1:\n print('** missing -infiles option')\n errs += 1\n\n if errs: return -1\n\n return 0", "def print_result(self, result_line: str):\r\n if self.output_option == 1:\r\n print(result_line)\r\n else:\r\n self.ER_file.write('{}\\n'.format(result_line))", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def result_handler(raw_output):\n write_log(raw_output[1])\n item_list = []\n line_list = raw_output[1].split('\\n')\n title_list = re.sub(\"\\s{2,}\", \"\\t\", line_list[0]).split(\"\\t\")\n for line in line_list[1:]:\n item_list.append(re.sub(\"\\s{2,}\", \"\\t\", line).split(\"\\t\"))\n return raw_output[0], title_list, item_list", "def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)", "def options(self, content, result='all'):\n if not self.tracconfig:\n raise Exception('TracMacroConfig not setup properly - no config')\n good_result = [ 'all', 'wellknown', 'extra', 'nothing' ]\n if not result is None and not result in good_result:\n raise ValueError(\"TracMacroConfig.options(result='%s') invalid;\"\n \" use one of %s, or None.\" % (\n result, ','.join([ \"%s\" % x for x in good_result ])))\n self.results_list, options = parse_args(content, strict=False)\n self._log('parse incoming %s' % options)\n self._parse(options)\n if result is None or result == 'nothing':\n return None\n results = {}\n for key, ent in self.results.iteritems():\n if result == 'all' or \\\n (result == 'wellknown' and key in self.wanted) or \\\n (result == 'extra' and not key in self.wanted):\n results[key] = ent[0]\n self._log('parse results %s' % results)\n return results", "def ProcessOptions():\n \n MiscUtil.PrintInfo(\"Processing options...\")\n \n # Validate options...\n ValidateOptions()\n \n OptionsInfo[\"CalcRMSD\"] = Options[\"--calcRMSD\"]\n OptionsInfo[\"UseBestRMSD\"] = False\n if re.match(\"^BestRMSD$\", OptionsInfo[\"CalcRMSD\"], re.I):\n OptionsInfo[\"UseBestRMSD\"] = True\n \n OptionsInfo[\"MaxIters\"] = int(Options[\"--maxIters\"])\n \n OptionsInfo[\"Mode\"] = Options[\"--mode\"]\n \n OptionsInfo[\"RefFile\"] = Options[\"--reffile\"]\n OptionsInfo[\"ProbeFile\"] = Options[\"--probefile\"]\n\n # No need for any RDKit specific --outfileParams....\n OptionsInfo[\"InfileParams\"] = MiscUtil.ProcessOptionInfileParameters(\"--infileParams\", Options[\"--infileParams\"])\n \n OptionsInfo[\"Outfile\"] = Options[\"--outfile\"]\n \n OptionsInfo[\"Overwrite\"] = Options[\"--overwrite\"]\n \n OptionsInfo[\"OutDelim\"] = \" \"\n if MiscUtil.CheckFileExt(OptionsInfo[\"Outfile\"], \"csv\"):\n OptionsInfo[\"OutDelim\"] = \",\"\n elif MiscUtil.CheckFileExt(OptionsInfo[\"Outfile\"], \"tsv txt\"):\n OptionsInfo[\"OutDelim\"] = \"\\t\"\n else:\n MiscUtil.PrintError(\"The file name specified , %s, for option \\\"--outfile\\\" is not valid. Supported file formats: csv tsv txt\\n\" % (OptionsInfo[\"Outfile\"]))", "def parse_options(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input_csv\",\n type=str,\n help=\"Input CSV path(s)\",\n required=True,\n nargs=\"+\",\n )\n parser.add_argument(\n \"-o\", \"--output_csv\", type=str, help=\"Output CSV path\", required=True\n )\n parser.add_argument(\n \"-k\",\n \"--kernel\",\n type=str,\n default=\"rbf\",\n help='SVM kernel type (\"rbf\", \"linear\", \"poly\" or \"sigmoid\")',\n )\n parser.add_argument(\n \"-c\", type=float, default=1.0, help=\"Penalty parameter C of the error term\"\n )\n parser.add_argument(\n \"-g\",\n \"--gamma\",\n type=str,\n default=\"auto\",\n help=\"Gamma parameter of the SVM.\",\n )\n parser.add_argument(\n \"-r\",\n \"--mb_ram\",\n type=float,\n default=4000,\n help=\"Available RAM in megabytes, for SVM calculation\",\n )\n parser.add_argument(\n \"-f\",\n \"--fdr_cutoff\",\n type=float,\n default=0.01,\n help=\"Target PSMs with a lower FDR will be used as a \"\n \"positive training set\",\n )\n parser.add_argument(\n \"-x\",\n \"--columns_as_features\",\n type=str,\n nargs=\"+\",\n default=[\n \"MS-GF:RawScore\",\n \"MS-GF:DeNovoScore\",\n \"MS-GF:SpecEValue\",\n \"MS-GF:EValue\",\n \"OMSSA:evalue\",\n \"OMSSA:pvalue\",\n \"X\\!Tandem:expect\",\n \"X\\!Tandem:hyperscore\",\n ],\n help=\"Columns that should be used as a feature directly \"\n \"(e.g. secondary scores). Will be converted to float\",\n )\n parser.add_argument(\n \"-d\",\n \"--dump_svm_matrix\",\n type=str,\n default=False,\n help=\"Dump SVM matrix in PIN (Percolator input) format \"\n \"to the specified path, mostly for debugging \"\n \"and benchmarking.\",\n )\n\n arg_dict = vars(parser.parse_args()) # convert to dict\n self.update(arg_dict)\n try:\n self[\"gamma\"] = float(self[\"gamma\"])\n except ValueError:\n assert (\n self[\"gamma\"] == \"auto\"\n ), \"Invalid gamma param: \" '\"{0}\", using \"auto\" instead.'.format(\n self[\"gamma\"]\n )", "def get_options(self):\n options = dict()\n while True:\n line = self.rfile.readline().decode(\"utf8\").strip()\n if not line:\n break\n self.log.debug(\"Got line: %s\", line)\n if \":\" not in line:\n self.log.debug(\"Invalid option: %s\", line)\n error_msg = \"header not in 'Name: value' format\"\n raise oa.errors.InvalidOption(error_msg)\n name, value = line.split(\":\", 1)\n options[name.lower()] = value.strip()\n return options", "def get_parse():\n parser = argparse.ArgumentParser(description=print(mytext),\n add_help=True,\n formatter_class=argparse.RawTextHelpFormatter)\n requiredNAMED = parser.add_argument_group('required arguments')\n\n requiredNAMED.add_argument(\"-i\", \"--input\",\n required=True,\n default=None,\n dest=\"input\",\n type=str,\n help='''/path/to/VariantsToTable_output.txt\nIt is assumed that there is either a 'locus' or 'unstitched_locus' column.\nThe 'locus' column elements are the hyphen-separated\nCHROM-POS. If the 'unstitched_chrom' column is present, the code will use the\n'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The\n'unstitched_locus' elements are therefore the hyphen-separated \nunstitched_locus-unstitched_pos. FREQ columns from VarScan are also \nassumed.\n''')\n requiredNAMED.add_argument(\"-o\",\"--outdir\",\n required=True,\n default=None,\n dest=\"outdir\",\n type=str,\n help='''/path/to/cmh_test_output_dir/\nFile output from cmh_test.py will be saved in the outdir, with the original\nname of the input file, but with the suffix \"_CMH-test-results.txt\"''')\n requiredNAMED.add_argument(\"--case\",\n required=True,\n default=None,\n dest=\"case\",\n type=str,\n help='''The string present in every column for pools in \"case\" treatments.''')\n requiredNAMED.add_argument(\"--control\",\n required=True,\n default=None,\n dest=\"control\",\n type=str,\n help='''The string present in every column for pools in \"control\" treatments.''') \n requiredNAMED.add_argument(\"-p\",\"--ploidy\",\n required=True,\n default=None,\n dest=\"ploidyfile\",\n type=str,\n help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python\ndictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code\nwill prompt for pool_name if necessary.''')\n requiredNAMED.add_argument(\"-e\",\"--engines\",\n required=True,\n default=None,\n dest=\"engines\",\n type=int,\n help=\"The number of ipcluster engines that will be launched.\")\n parser.add_argument(\"--ipcluster-profile\",\n required=False,\n default='default',\n dest=\"profile\",\n type=str,\n help=\"The ipcluster profile name with which to start engines. Default: 'default'\")\n parser.add_argument('--keep-engines',\n required=False,\n action='store_true',\n dest=\"keep_engines\",\n help='''Boolean: true if used, false otherwise. If you want to keep\nthe ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.\n(default: False)''')\n\n # check flags\n args = parser.parse_args()\n if not op.exists(args.outdir):\n print(ColorText(f\"FAIL: the directory for the output file(s) does not exist.\").fail())\n print(ColorText(f\"FAIL: please create this directory: %s\" % args.outdir).fail())\n print(ColorText(\"exiting cmh_test.py\").fail())\n exit()\n\n # make sure input and ploidyfile exist\n nopath = []\n for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path\n if not op.exists(x):\n nopath.append(x)\n\n # if input or ploidy file do not exist:\n if len(nopath) > 0:\n print(ColorText(\"FAIL: The following path(s) do not exist:\").fail())\n for f in nopath:\n print(ColorText(\"\\tFAIL: %s\" % f).fail())\n print(ColorText('\\nexiting cmh_test.py').fail())\n exit()\n\n print('args = ', args)\n return args", "def getResult(f, opt, model_name):\n tacc = []\n vacc = []\n opt.write('======= ' + model_name + ' =======\\n')\n for l in f.readlines():\n if l.startswith('#'):\n opt.write(l[2:] + '\\n')\n elif l.startswith('Epoch'):\n continue\n else:\n data = l.split(' - ')\n tacc.append(float(data[2].split(': ')[1]))\n vacc.append(float(data[4].split(': ')[1]))\n return np.array(tacc), np.array(vacc)", "def parse_command_line():\n\n parser=OptionParser(usage=\"%prog [options] \",\n description=\" updates tracker\" )\n parser.add_option(\"-c\", \"--candidate\", action=\"store\", type=\"string\",\n dest=\"candidate\", default=\"\", help=\"candidate name\")\n parser.add_option(\"-u\", \"--username\", action=\"store\", type=\"string\",\n dest=\"username\", default=\"gzhou\",\n help=\"username\")\n parser.add_option(\"-p\",\"--password\", action=\"store\",\n dest=\"password\", default=\"egghead\", help=\"password\")\n parser.add_option(\"-i\",\"--input\", action=\"store\",\n dest=\"input\", default=\"tracker_temp.txt\", help=\"Input file\") \n parser.add_option(\"-l\",\"--upload\", action=\"store\",\n dest=\"upload\", default=None, help=\"upload file\") \n parser.add_option(\"-d\",\"--description\", action=\"store\",\n dest=\"description\", default=None, help=\"descirption\") \n (options, args)=parser.parse_args()\n\n return options, args", "def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))", "def parse_options(self, options):\n pass", "def parse_lspci_vv_chk_error(output,raiseOnErrors = \"1\"):\n \n found_devSta = 0\n \n #sys.exit(1)\n if re.search(\"DevSta\",output):\n found_devSta = 1\n \n # remove DevStat after splitting it\n l_a = output.split(\":\")\n l_a1 = l_a[1].split()\n for m in l_a1:\n \n # if ends with +, \n if re.search(\"Err\",m):\n if re.search(\".*\\+$\",m):\n \n print \"-\" * 8\n \n errorStr = \"Found + in lspci output for '%s' , line details '%s'\"%(m,output)\n trace_error(errorStr)\n if raiseOnErrors == \"1\":\n raise ViriError(errorStr)\n\n return 2\n \n if found_devSta == 0:\n raise ViriError(\"Did not find 'devSta' in the output %s\"%output)\n\n trace_info(\"No lspci correctable or uncorrectable issues seem to be present , output '%s'\"%output)\n return 1", "def _build_parsed_values(self):\n match = OptodeSettingsParticle.regex_compiled().match(self.raw_data)\n\n if not match:\n raise SampleException(\"No regex match of parsed optode data: [%s]\" %\n self.raw_data)\n\n try:\n return self._get_multiline_values()\n except ValueError as e:\n raise SampleException(\"ValueError while decoding optode output: [%s]\" % e)", "def _parse(self):\n with open(self.input) as f:\n for line in f:\n if not line.lstrip().startswith(\"#\"): # comment\n stripped_line=line.split(\"#\")[0].strip()\n \n # Initialise an empty option dictionary with some good defaults\n if \"[\" in stripped_line:\n molname=stripped_line.split()[1]\n self.options[molname]=self.empty_option_dict.copy() # dict1=dict2 does not copy!\n self.options[molname][\"MolName\"]=molname\n if \":\" in stripped_line: \n # now process line by line\n if \"{\" not in stripped_line:\n key,value=[i.strip() for i in stripped_line.split(\":\")]\n\n if key not in self.options[molname].keys():\n raise BaseException(\"Option \\\"{}\\\" not known, please check your input file\".format(key))\n self.options[molname][key]=value \n else:\n # This is to define special lines that are given by a dictionary\n key,value=stripped_line.split(\":\",1) # split on first occurence\n if key==\"Addon\": # additional atoms to be added per molecule\n addondict=self.empty_addon_dict.copy()\n addondict_string = value.split(\"}\",-1)[0].split(\"{\",1)[1]\n for pair in addondict_string.split(\",\"):\n addonkey,addonvalue=[i.strip() for i in pair.split(\":\")]\n if addonkey not in addondict.keys():\n raise BaseException(\"Option \\\"{}\\\" in Addon section of molecule {} not known, please check your input file\".format(addonkey,molname))\n addondict[addonkey]=addonvalue\n value=addondict\n # Since addon keyword can be used many times, this is a list\n self.options[molname][key].append(value) \n self._check()", "def main():\n global options\n parser = OptionParser(\n usage = '%prog [OPTIONS] RESULTPATH',\n version='%prog 0.99', #\n description='Calculate results on acl2018 datasets',\n epilog='Contact [email protected]'\n )\n parser.add_option('-l', '--logfile', dest='logfilename',\n help='write log to FILE', metavar='FILE')\n parser.add_option('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_option('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n parser.add_option('-g', '--glob',\n action='store', dest='glob', default='{RESULTPATH}/x*x/*/s*/*eval',\n help='change file globbing for accessing evaluation results (%default)')\n parser.add_option('-f', '--fold_filter',\n action='store', dest='fold_filter', default=None,\n help='only use folds matching (re.search) the specified regular expression on the fold name (e.g. \"^english\" for all folds starting with the string english) (Default \"%default\")')\n parser.add_option('-D', '--decoder_filter',\n action='store', dest='decoder_filter', default=\"greedy|beam4\",\n help='''used on decoding mode label; matches (re.search) with the specified regular expression (Default \"%default\")''')\n parser.add_option('-m', '--mode',\n action='store', dest='mode', default='ms',\n help='''compatibel characters can be combined\n s: individual seed results;\n S: only individual seed results;\n m: mean/sd values (on seeds and folds);\n M: mean/sd (on folds only);\n e: include ensembles;\n E: only ensembles;\n T: only test results;\n D: only dev results\n q: sort the results by accuracy\n L: evaluate on edit distance, not on Accuracy\n ''')\n\n (options, args) = parser.parse_args()\n if options.debug:\n print(\"options=\",options,file=sys.stderr)\n\n if len(args) < 1:\n print('# RESULTPATH needed')\n parser.print_help()\n exit(1)\n options.mode = set(options.mode)\n process(options=options,args=args)", "def processArgs(self, argv):\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--show_ADT\", action=\"store_true\", dest=\"show_ADT\",\n default=self.show_ADT, help=\"Display ADT value if set\")\n parser.add_option(\"-f\", \"--show_file\", action=\"store_true\", dest=\"show_file\",\n default=self.show_file, help=\"Display matching filename if set\")\n parser.add_option(\"-t\", \"--show_time\", action=\"store_true\", dest=\"show_time\",\n default=self.show_time, help=\"Display message time\")\n parser.add_option(\"-v\", \"--show_visitID\", action=\"store_true\", dest=\"show_visitID\",\n default=self.show_visitID, help=\"Display visit ID\")\n parser.add_option(\"-p\", \"--show_pc\",\n action=\"store_true\",\n dest=\"show_pc\",\n default=self.show_pc,\n help=\"Display patient class\")\n\n (options, pargs) = parser.parse_args()\n if len(pargs) < 3:\n parser.error(\"incorrect number of arguments\")\n\n self.show_ADT = parser.values.show_ADT\n self.show_file = parser.values.show_file\n self.show_time = parser.values.show_time\n self.show_visitID = parser.values.show_visitID\n self.show_pc = parser.values.show_pc\n \n self.segments_of_interest = pargs.pop(0)\n if len(self.segments_of_interest) != 3:\n parser.error(\"segment '%s' looks incorrect, expected something like 'PV1'\"\n % self.segments_of_interest)\n\n try:\n nums = pargs.pop(0).split(\",\")\n for num in nums:\n if 'MSH' == self.segments_of_interest:\n num = int(num) - 1\n self.sequences.append(int(num))\n except:\n parser.error(\"sequence must be an integer, separate multiple w/ comma and no spaces\")\n\n for patternOrFile in pargs:\n for file in glob.glob(patternOrFile):\n if not os.path.isfile(file):\n parser.error(\"can't open input file %s\" % file)\n self.filelist.append(file)\n \n # Require at least one file\n if not len(self.filelist):\n parser.error(\"at least one input file is required\")", "def _parse_output_status_details(lines):\n details = list()\n detail_indicator = re.compile(\"^--\")\n for line in lines:\n line = line.rstrip()\n if re.match(detail_indicator, line):\n details.append(line)\n else:\n break\n return details", "def _parse_vn(lines, out_format):\n number = 0\n lines_out = []\n loa = lines_out.append\n for line in lines:\n coordinates = split_line(line)\n if len(coordinates) == 3:\n number += 1\n loa(out_format % (-float(coordinates[0]),\n float(coordinates[1]),\n float(coordinates[2])))\n return number, lines_out" ]
[ "0.6423425", "0.5811027", "0.580129", "0.5655842", "0.55434406", "0.533609", "0.53108877", "0.53000253", "0.5271251", "0.5269717", "0.5203191", "0.51964784", "0.5171904", "0.5154556", "0.5100387", "0.5036834", "0.5026639", "0.5026215", "0.5009487", "0.49926525", "0.49875903", "0.49847418", "0.49475414", "0.49277356", "0.4921028", "0.4915675", "0.48955688", "0.4889478", "0.48839632", "0.48807707" ]
0.8179622
0
Parse headers and entries in line, create dictionary and append it to RESULT list.
def _parse_header(self, line): if self._regex_helper.search_compiled(W._re_header, line): if not self.headers: for value in re.findall(W._re_header, line): self.headers.append(value[0]) raise ParsingDone else: # Dictionary which is going to be appended to the returned list ret = dict() # List of entries _entries = list() # List of values in WHAT entry _what_entry = list() for value in re.findall(W._re_header, line): _entries.append(value[0]) for what_index in range(len(self.headers) - 1, len(_entries)): _what_entry.append(_entries[what_index]) _what_entry_string = ' '.join(_what_entry) for index in range(len(self.headers)): if index < len(self.headers) - 1: ret.update({self.headers[index]: _entries[index]}) else: ret.update({self.headers[index]: _what_entry_string}) self.current_ret['RESULT'].append(ret) raise ParsingDone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def _result_to_dict(line):\n f = line.split(':;')\n return {'server': f[0], 'os_name': f[1], 'status': f[2], 'ipv4': f[3]}", "def _parse_line(line: Match[str]) -> dict:\n request = line.group(\"request\")\n request = request.split()\n req_method = request[0] # GET, POST, PUT, etc.\n url = request[1]\n x = url.split(\"/\")[3:]\n uri = f'/{\"/\".join(x)}'\n\n timestamp = line.group(\"timestamp\") # timestamp in ISO format\n timestamp = MyTime._try_isoformat(timestamp, tzinfo=\"UTC\").dt\n\n res = {\n \"url\": url,\n \"uri\": uri,\n \"req_method\": req_method,\n \"timestamp\": timestamp,\n \"user_agent\": line.group(\"user_agent\"),\n }\n return res", "def parse_line(line):\n parts = line.strip().split('\\t')\n\n output = {}\n\n if len(parts) != len(COLUMNS):\n raise Exception('Incorrect number of columns in line.', parts, COLUMNS)\n\n for key, value in zip(COLUMNS, parts):\n if key == 'attributes':\n output[key] = parse_attributes(value)\n elif key == 'start' or key == 'stop':\n output[key] = int(value)\n else:\n output[key] = value\n\n return output", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def _parse_header(lines):\n # The dict into which we will store header fields.\n header = {}\n # Loop over lines in the header.\n for line in lines:\n # Find the first colon.\n index = line.index(COLON)\n # Up to the colon is the field name.\n name = line[: index]\n # After the colon is the field value.\n value = line[index + 1 :]\n # The field value may begin or end with extra space, which is not \n # significant. Remove it.\n value = value.strip()\n # Store the field.\n header[name] = value\n # All done.\n return header", "def process_line(self,\n event_file,\n raw_log_line,\n header_length=29,\n log_filename=\"\"):\n\n event_data = {}\n for filter_name, regex in self._filters_dict.items():\n match = regex.search(raw_log_line)\n if match:\n match_data = match.groups()\n event_data[filter_name] = match_data\n\n if event_data:\n if log_filename:\n event_data[\"log_filename\"] = log_filename\n event_data[\"raw_log_line\"] = raw_log_line.rstrip()[header_length:]\n event_data[\"system_timestamp\"] = raw_log_line[1:27]\n event_data[\"matched_timestamp\"] = \\\n datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n event_file.write(json.dumps(event_data) + \"\\n\")\n event_file.flush()", "def get_data(contents):\n\n header = {}\n copy = contents.split(\"\\n\")\n\n header_contents = dict(\n start_date=\"Start Date\",\n end_date=\" End Date\",\n subject=\" Subject\",\n experiment=\" Experiment\",\n group=\" Group\",\n box=\" Box\",\n start_time=\" Start Time\",\n end_time=\" End Time\",\n program=\" Program\",\n msn=\" MSN\",\n )\n\n for line in copy:\n for key in header_contents:\n heading = line.split(\":\")\n if heading[0] == header_contents[key]:\n if key == \"start_time\" or key == \"end_time\":\n header[key] = (\n heading[1].lstrip() + \":\" + heading[2] + \":\" + heading[3]\n )\n else:\n header[key] = heading[1].lstrip()\n\n data = {}\n copy = contents.split()\n\n uppercase = string.ascii_uppercase\n\n idx = []\n for i, val in enumerate(copy):\n if val[0] in uppercase and val[1] == \":\":\n idx.append(i)\n\n for i, j in zip(idx[:-1], idx[1:]):\n data[copy[i].lower()[0]] = [\n timestamp for timestamp in copy[i + 1 : j] if timestamp[-1] != \":\"\n ]\n\n return header, data", "def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def agline(line):\n\n vals = {}\n x = ['date', 'movie', 'offset', 'well', 'gen', 'flarem', 'flares', \n 'chargem', 'charges', 'charget', 'escdm', 'escds', 'escddur', 'escmm', 'escms', \n 'escmdur']\n y = line.strip('\\n').split(',')[0:16]\n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def process(line):\n output = {}\n\n fields = line.split(\" \")\n if len(fields) < 6:\n return False\n \n output[\"log_level\"] = fields[0]\n \n # Convert date/time to object or reject line\n try:\n output[\"datetime\"] = datetime.datetime.strptime(\n \"%s %s\" % (fields[1], fields[2]),\n \"%Y-%m-%d %H:%M:%S,%f\"\n )\n except ValueError:\n return False\n \n output[\"logger\"] = fields[3]\n output[\"thread\"] = fields[4]\n output[\"process\"] = fields[5]\n output[\"message\"] = \" \".join(fields[6:])\n \n return output", "def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p", "def ParseLine(line):\n fields = line.split()\n ip = fields[0]\n datestr = ' '.join(fields[3:5])[1:-1]\n timestamp = datetime.strptime(\n datestr, '%d/%b/%Y:%H:%M:%S %z'\n ).timestamp()\n command = fields[5][1:]\n uri = fields[6]\n protocol = fields[7][:-1]\n status = int(fields[8])\n size = int(fields[9])\n meta = [var.strip('\"') for var in fields[11:-1]]\n return {\n 'timestamp': timestamp,\n 'ip': ip,\n 'command': command,\n 'uri': uri,\n 'protocol': protocol,\n 'status': status,\n 'size': size,\n 'meta': meta\n }", "def _parse_line(self):\n # check if line contains a rule or not\n stripped = self._line.strip()\n if not stripped or stripped.startswith(\"#\"):\n return None\n\n # strip out double quotes from values, and simplify equals strings\n simplified = self._line.replace(\"==\", \"=\").replace('\"', '')\n\n # return a dictionary formed from the key=value pairs found in line\n return dict(f.strip().split(\"=\", 1) for f in simplified.split(\",\"))", "def do_Promo_line_parse (Promo_line, line_number, filehash) :\n result = [filehash,\n line_number,\n Promo_line[0:8].strip(),\n Promo_line[9:13].strip(),\n Promo_line[14:19].strip(),\n Promo_line[20:26].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[27:30].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[31:40].strip(),\n Promo_line[41:49].strip(),\n Promo_line[50:].strip()\n ]\n return result\n # Having the line number passed in is ugly, but kind of works :/\n # Having all the field extraction explicit is kind of ugly too...\n # We're using the hash here to link? Yeah, that's because Python\n # doesn't know what the autonumbered load table is up to in the\n # DB when it starts to coalesce the raw files together.", "def agline2(line):\n \n vals = {}\n y = line.strip('\\n').split(',')\n y.extend(y[0].strip('.MTS').split('_'))\n \n #print(y)\n \n x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n 'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n 'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n \n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def _read_header_line_1(self, lines: list) -> dict:\n fields = (\n \"model_id\",\n \"unit_id\",\n \"software_level\",\n \"message_number\",\n \"message_subclass\",\n )\n if self._is_ct25k():\n indices = [1, 3, 4, 6, 7, 8]\n else:\n indices = [1, 3, 4, 7, 8, 9]\n values = [split_string(line, indices) for line in lines]\n return values_to_dict(fields, values)", "def get_line_desc(self, line):\n return dict(zip(self.header, line))", "def parse_header(line):\n # 2015-09-27 14:55:41 UTC [192.0.2.1]:56721 -> [192.0.2.2]:443 (37):\n m = re.match(r'(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} \\S+) \\[(.+?)\\]:(\\d+) -> \\[(.+?)\\]:(\\d+) \\((\\d+|EOF)\\):?', line)\n if not m:\n raise LogSyntaxError(line)\n res = {}\n res['timestamp'] = m.group(1)\n res['src_addr'] = m.group(2)\n res['src_port'] = int(m.group(3))\n res['dst_addr'] = m.group(4)\n res['dst_port'] = int(m.group(5))\n if m.group(6) == 'EOF':\n res['eof'] = True\n else:\n res['eof'] = False\n res['size'] = int(m.group(6))\n return res", "def readHeaders(lines):\n\n result = {}\n for line in lines:\n if line[0] == \"#\":\n continue\n if line.strip() == \"\":\n continue\n key, value = line.split(\":\", 1)\n result[key.strip()] = parseSloppily(value)\n return result", "def make_entry(line):\n #focus on relevant parts\n parts = line.split(\" - \")\n visitor_id = parts[0]\n subparts = parts[1].split('\"')\n method_and_uri = subparts[1]\n method_and_uri_parts = method_and_uri.split(\" \")\n method = method_and_uri_parts[0]\n uri = method_and_uri_parts[1]\n d = dict()\n d[\"visitor_id\"] = visitor_id\n d[\"method\"] = method\n d[\"uri\"] = uri\n return d", "def parse_ensembl_line(line, header):\n line = line.rstrip().split(\"\\t\")\n header = [head.lower() for head in header]\n raw_info = dict(zip(header, line))\n\n ensembl_info = {}\n\n for word in raw_info:\n value = raw_info[word]\n if not value:\n continue\n\n if \"chromosome\" in word:\n ensembl_info[\"chrom\"] = value\n if \"gene name\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"hgnc id\" in word:\n ensembl_info[\"hgnc_id\"] = int(value.split(\":\")[-1])\n if \"hgnc symbol\" in word:\n ensembl_info[\"hgnc_symbol\"] = value\n if \"strand\" in word:\n ensembl_info[\"strand\"] = int(value)\n\n update_gene_info(ensembl_info, word, value)\n update_transcript_info(ensembl_info, word, value)\n update_exon_info(ensembl_info, word, value)\n update_utr_info(ensembl_info, word, value)\n update_refseq_info(ensembl_info, word, value)\n return ensembl_info", "def _read_header_line_2(lines: list) -> dict:\n fields = (\n \"detection_status\",\n \"warning\",\n \"cloud_base_data\",\n \"warning_flags\",\n )\n values = [[line[0], line[1], line[3:20], line[21:].strip()] for line in lines]\n return values_to_dict(fields, values)", "def parse(line):\n return dict([pair.split(':') for pair in line.split()])", "def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }", "def _read_entry_detail(self, line):\n account_number = 0\n try:\n account_number = int(line[12:29].strip().replace('-', ''))\n except ValueError as err:\n print('Error parsing account number field -> ' + str(err))\n\n result_dict = {'Transaction Code': line[1:3],\n 'ReceivingID': line[3:11],\n 'CheckDigit': line[11],\n 'Account Number': account_number,\n 'Amount': int(line[29:39]) / 100,\n 'Individual ID': line[39:54],\n 'Receiver Name': line[54:76].strip(),\n 'DiscretionaryData': line[76:78],\n 'AddendaIndicator': line[78],\n 'TraceNumber': line[79:94]}\n\n self.entries.append(result_dict)", "def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)", "def _read_batch_header(self, line):\n try:\n effective_entry_date = datetime.strptime(line[69:75], '%y%m%d')\n except ValueError as err:\n print('Error parsing effective entry date -> ' + str(err))\n effective_entry_date = '00000000'\n\n batch_header_dict = {'Service Class Code': line[1:4],\n 'Company Name': line[4:20].strip(),\n 'Company Discretionary Data': line[20:40].strip(),\n 'Company ID': line[40:50].strip(),\n 'SEC Code': line[50:53],\n 'Company Entry Description': line[53:63].strip(),\n 'Company Descriptive Date': line[63:69].strip(),\n 'Effective Entry Date': effective_entry_date,\n 'Settlement Date Julian': line[75:78],\n 'Originator Status Code': line[78],\n 'Originating DFI ID': line[79:87],\n 'Batch Number': line[87:94]}\n self.batch_headers.append(batch_header_dict)" ]
[ "0.66752446", "0.6504577", "0.63924736", "0.6246586", "0.60992664", "0.60275394", "0.59553164", "0.5938524", "0.58829117", "0.58815986", "0.58645564", "0.5864273", "0.5848218", "0.5842252", "0.58342505", "0.58329415", "0.5803412", "0.5788858", "0.57804203", "0.5771522", "0.57667285", "0.57658106", "0.5758933", "0.57427806", "0.573765", "0.5730785", "0.5693323", "0.5683553", "0.56792504", "0.5668925" ]
0.7336327
0
We need to find the halfedge h that is incident to u and is on the face that contains the diagonal uv. To find it, we test every pair of halfedges incident to u and v until we find a pair belonging to the same face.
def referenceEdge(u,v): v1 = u v2 = v e1 = u.getEdge().getPrev() e2 = v.getEdge().getPrev() aux = None #aux is an half-edge incident to u while aux != e1: if aux is None: aux = e1 aux2 = None #aux2 is an half-edge incident to v while aux2 != e2: if aux2 is None: aux2 = e2 if aux.getFace() == aux2.getFace(): return aux aux2 = aux2.getNext().getTwin() aux = aux.getNext().getTwin() return e1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_homography(u, v):\r\n N = u.shape[0]\r\n H = None\r\n\r\n if v.shape[0] is not N:\r\n print('u and v should have the same size')\r\n return None\r\n if N < 4:\r\n print('At least 4 points should be given')\r\n\r\n # TODO: 1.forming A\r\n A = np.zeros((2*N, 8))\r\n for i in range(N):\r\n A[2*i, :] = np.array([u[i, 0], u[i, 1], 1, 0, 0, 0, -u[i, 0]*v[i,0], -u[i, 1]*v[i, 0]])\r\n A[2*i+1, :] = np.array([0, 0, 0, u[i, 0], u[i, 1], 1, -u[i, 0]*v[i, 1], -u[i, 1]*v[i, 1]])\r\n\r\n # TODO: 2.solve H with A\r\n b = v.reshape(-1)\r\n H, res, _, _ = np.linalg.lstsq(A, b, rcond=None)\r\n H = np.concatenate((H, np.array([1])))\r\n H = H.reshape(3,3)\r\n\r\n return H", "def edge_on_silhouette(self, vertices_2d: np.ndarray) -> np.ndarray:\n assert vertices_2d.ndim == 2\n assert vertices_2d.shape[1] == 2\n triangles = vertices_2d[self.faces, :]\n u = triangles[:, 1, :] - triangles[:, 0, :]\n v = triangles[:, 2, :] - triangles[:, 0, :]\n face_visible = np.cross(u, v) > 0 if self.clockwise else np.cross(u, v) < 0\n edge_bool = (self.edges_faces_ones * face_visible) == 1\n return edge_bool[self.faces_edges]", "def trimesh_collapse_edge(self, u, v, t=0.5, allow_boundary=False, fixed=None):\r\n if t < 0.0:\r\n raise ValueError('Parameter t should be greater than or equal to 0.')\r\n if t > 1.0:\r\n raise ValueError('Parameter t should be smaller than or equal to 1.')\r\n\r\n # check collapse conditions\r\n if not is_collapse_legal(self, u, v, allow_boundary=allow_boundary):\r\n return False\r\n\r\n # compare to fixed\r\n fixed = fixed or []\r\n if v in fixed or u in fixed:\r\n return False\r\n\r\n # move U\r\n x, y, z = self.edge_point(u, v, t)\r\n\r\n self.vertex[u]['x'] = x\r\n self.vertex[u]['y'] = y\r\n self.vertex[u]['z'] = z\r\n\r\n # UV face\r\n fkey = self.halfedge[u][v]\r\n\r\n if fkey is None:\r\n del self.halfedge[u][v]\r\n else:\r\n face = self.face[fkey]\r\n\r\n o = face[face.index(u) - 1]\r\n\r\n del self.halfedge[u][v]\r\n del self.halfedge[v][o]\r\n del self.halfedge[o][u]\r\n del self.face[fkey]\r\n\r\n if len(self.halfedge[o]) < 2:\r\n del self.halfedge[o]\r\n del self.vertex[o]\r\n del self.halfedge[u][o]\r\n\r\n # VU face\r\n fkey = self.halfedge[v][u]\r\n\r\n if fkey is None:\r\n del self.halfedge[v][u]\r\n else:\r\n face = self.face[fkey]\r\n\r\n o = face[face.index(v) - 1]\r\n\r\n del self.halfedge[v][u]\r\n del self.halfedge[u][o]\r\n del self.halfedge[o][v]\r\n del self.face[fkey]\r\n\r\n if len(self.halfedge[o]) < 2:\r\n del self.halfedge[o]\r\n del self.vertex[o]\r\n del self.halfedge[v][o]\r\n\r\n # neighborhood of V\r\n for nbr, fkey in list(self.halfedge[v].items()):\r\n\r\n if fkey is None:\r\n self.halfedge[u][nbr] = None\r\n del self.halfedge[v][nbr]\r\n else:\r\n # a > v > nbr => a > u > nbr\r\n face = self.face[fkey]\r\n a = face[face.index(v) - 1]\r\n self.face[fkey] = [a, u, nbr]\r\n\r\n if v in self.halfedge[a]:\r\n del self.halfedge[a][v]\r\n del self.halfedge[v][nbr]\r\n\r\n self.halfedge[a][u] = fkey\r\n self.halfedge[u][nbr] = fkey\r\n self.halfedge[nbr][a] = fkey\r\n\r\n # nbr > v > d => nbr > u > d\r\n if v in self.halfedge[nbr]:\r\n self.halfedge[nbr][u] = self.halfedge[nbr][v]\r\n del self.halfedge[nbr][v]\r\n\r\n # delete V\r\n del self.halfedge[v]\r\n del self.vertex[v]\r\n\r\n # clean up\r\n for nu in self.halfedge[u]:\r\n for nbr in self.halfedge[nu]:\r\n if nbr == v:\r\n self.halfedge[nu][u] = self.halfedge[nu][v]\r\n del self.halfedge[nu][v]\r\n\r\n return True", "def get_faces(ulist, vlist):\n width = len(ulist)\n faces = []\n for i in range(len(ulist) - 1):\n for j in range(len(vlist) - 1):\n topleft = j * width + i\n topright = topleft + 1\n bottomleft = ((j + 1) * width) + i\n bottomright = bottomleft + 1\n one = [topleft, topright, bottomleft]\n two = [bottomleft, topright, bottomright]\n faces.append(one)\n faces.append(two)\n\n return faces", "def close_up_hull(hull, edge_count, used_pivots):\n face_added = close_up(edge_count, used_pivots)\n if not face_added:\n face = list(hull[0])\n # det(A) = -det (B) if two cols swap (odd and even)\n face[-2], face[-1] = face[-1], face[-2]\n face_added = [tuple(face)]\n for face in face_added:\n hull.append(face)\n\n return len(face_added)", "def findFours(self):\n\n for i in range(6):\n for j in range(7):\n if self.board[i][j] != ' ':\n # check if a vertical four-in-a-row starts at (i, j)\n if self.verticalCheck(i, j):\n self.highlightFour(i, j, 'vertical')\n \n # check if a horizontal four-in-a-row starts at (i, j)\n if self.horizontalCheck(i, j):\n self.highlightFour(i, j, 'horizontal')\n \n # check if a diagonal (either way) four-in-a-row starts at (i, j)\n # also, get the slope of the four if there is one\n diag_fours, slope = self.diagonalCheck(i, j)\n if diag_fours:\n self.highlightFour(i, j, 'diagonal', slope)", "def H(self, u, v):\n return (self.E(u, v) * self.N(u, v)\n - 2 * self.F(u, v) * self.M(u, v)\n + self.G(u, v) * self.L(u, v)) / \\\n (2 * (self.E(u, v) * self.G(u, v) - np.square(self.F(u, v))))", "def Neighbourgs(abcd, h):\n\n Nelem = len(abcd)\n\n a = abcd[h][0]\n b = abcd[h][1]\n c = abcd[h][2]\n d = abcd[h][3]\n\n el1, el2, el3, el4 = 0, 0, 0, 0\n\n N = 0\n\n for j in range(0, Nelem - 1):\n\n if N == 4:\n break\n\n if a in abcd[j, :] and b in abcd[j, :] and j != h:\n N += 1\n el1 = j + 1\n\n if b in abcd[j, :] and c in abcd[j, :] and j != h:\n N += 1\n el2 = j + 1\n\n if c in abcd[j, :] and d in abcd[j, :] and j != h:\n N += 1\n el3 = j + 1\n\n if d in abcd[j, :] and a in abcd[j, :] and j != h:\n N += 1\n el4 = j + 1\n\n return [el1, el2, el3, el4]", "def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict", "def faceIntersection(self, face):\n # Test wether both ends of the edge are in the same half space\n # (relative to <face>'s plane).\n normal = face.normalVect()\n v0 = vector(face.vertices[0])\n vp = vector(self.pvt)\n vn = vector(self.nvt)\n p = normal.dotProduct(vp - v0) * normal.dotProduct(vn - v0)\n if p > 0:\n return False\n elif abs(p) <= COMPARISON_EPSILON or abs(normal.dotProduct(vp - vn) / (normal.norm() * (vp - vn).norm())) <= COMPARISON_EPSILON:\n # print('ah')\n return False\n else:\n interVect = vn + (normal.dotProduct(v0 - vn) /\n normal.dotProduct(vp - vn)) * (vp - vn)\n lastCross = ((vector(face.vertices[-1]) - interVect) *\n (vector(face.vertices[0]) - interVect))\n for i in range(len(face.vertices)):\n cross = ((vector(face.vertices[i]) - interVect) *\n (vector(face.vertices[(i + 1) % len(face.vertices)]) -\n interVect))\n p = cross.dotProduct(lastCross)\n if p < 0:\n return False\n elif p == 0 and cross.norm() != 0:\n if cross.norm() > COMPARISON_EPSILON:\n warnings.warn(\"Cross product's norm is very low\")\n lastCross = cross\n return interVect.coords()", "def inside(i,j,im,h=H): #X\n return i-h >=0 and j-h >=0 and i+h+1<=im.shape[0] and j+h+1<=im.shape[1]", "def detector_outline( bottom_vec3d_list, top_vec3d_list ):\n # hardcoded angular offset for hexagon\n phi0 = -20.0 * I3Units.degree \n\n # hardcoded threshold for an edge\n cos_angle_threshold = math.cos( 7.0 * I3Units.degree ) \n\n bottom = Vec3dList()\n top = Vec3dList()\n\n string_coords = []\n for b, t in zip( bottom_vec3d_list, top_vec3d_list ):\n if t[2] < 450.0 * I3Units.meter: # ignore deep-core\n continue\n string_coords.append(( math.atan2(t[1], t[0]),\n t[0], t[1], b[2], t[2] ))\n\n # border detection:\n # check if there is a point in each angular segment of hexagon\n border = []\n for i, cur in enumerate( string_coords ):\n counts = [False, False, False, False, False , False]\n for j, other in enumerate( string_coords ):\n if i == j: continue\n dx = cur[1] - other[1]\n dy = cur[2] - other[2]\n phi = int((math.atan2( dy, dx ) - phi0) / I3Units.degree)\n if phi < 0:\n phi += 360\n counts[phi // 60] = True\n neighbor_count = sum( counts )\n # border points don't have a full hexagon of neighbors\n if neighbor_count < 6:\n border.append( cur )\n\n border.sort() # put in circular order\n\n # edge detection:\n # check if differential vectors of three consecutive points have an angle\n for i in xrange( len(border) ):\n ax = border[i - 1][1] - border[i - 2][1]\n ay = border[i - 1][2] - border[i - 2][2]\n bx = border[i][1] - border[i - 1][1]\n by = border[i][2] - border[i - 1][2]\n anorm = (ax ** 2 + ay ** 2) ** 0.5\n bnorm = (bx ** 2 + by ** 2) ** 0.5\n cos_angle = (bx * ax + by * ay) / (anorm * bnorm)\n if cos_angle < cos_angle_threshold:\n cur = border[i - 1]\n bottom.append( vec3d(cur[1], cur[2], cur[3]) )\n top.append( vec3d(cur[1], cur[2], cur[4]) )\n\n return bottom, top", "def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))", "def split_face_mesh(mesh, fkey, u, v):\n if u not in mesh.face[fkey] or v not in mesh.face[fkey]:\n raise ValueError('The split vertices do not belong to the split face.')\n if mesh.face[fkey][u] == v:\n raise ValueError('The split vertices are neighbours.')\n d = mesh.face[fkey][u]\n f = [u]\n while True:\n f.append(d)\n if d == v:\n break\n d = mesh.face[fkey][d]\n d = mesh.face[fkey][v]\n g = [v]\n while True:\n g.append(d)\n if d == u:\n break\n d = mesh.face[fkey][d]\n f = mesh.add_face(f)\n g = mesh.add_face(g)\n del mesh.face[fkey]\n return f, g", "def bipartite_vertex_cover(bigraph, algo=\"Hopcroft-Karp\"):\n if algo == \"Hopcroft-Karp\":\n coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]\n coord = np.array(coord)\n graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))\n matchV = maximum_bipartite_matching(graph, perm_type='row')\n matchV = [None if x==-1 else x for x in matchV]\n nU, nV = graph.shape\n assert len(matchV) == nV\n elif algo == \"Hungarian\":\n matchV = max_bipartite_matching2(bigraph)\n nU, nV = len(bigraph), len(matchV)\n else:\n assert False\n\n matchU = [None] * nU\n \n for v in range(nV): # -- build the mapping from U to V\n if matchV[v] is not None:\n matchU[matchV[v]] = v\n \n def old_konig():\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n for u in range(nU):\n if matchU[u] is None: # -- starting with free vertices in U\n _alternate(u, bigraph, visitU, visitV, matchV)\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n def new_konig():\n # solve the limitation of huge number of recursive calls\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n wait_u = set(range(nU)) - set(matchV) \n while len(wait_u) > 0:\n u = wait_u.pop()\n visitU[u] = True\n for v in bigraph[u]:\n if not visitV[v]:\n visitV[v] = True\n assert matchV[v] is not None # otherwise match is not maximum\n assert matchV[v] not in wait_u\n wait_u.add(matchV[v])\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n #res_old = old_konig()\n res_new = new_konig()\n #assert res_old == res_new\n return res_new", "def halfedgeConstruction(self,facetList):\n\t\tedges = []\n\t\t# the temporary map of the halfedges\n\t\tedgeMap = {}\n\t\tcount = 0\n\t\tfor i in range(0,len(facetList)):\n\t\t\t# \n\t\t\tfor j in range(0,len(facetList[i])):\n\t\t\t\t# j is the index of the current vertex\n\t\t\t\tkey = (facetList[i][j-1],facetList[i][j])\n\t\t\t\tedges.append(key)\n\t\t\t\tedgeMap[key] = count\n\t\t\t\tcount += 1\n\t\t\t\t# initialze the current halfedge\n\t\t\t\the = Halfedge()\n\t\t\t\t# assume that the mesh has been well oriented\n\t\t\t\the.setVertex(self._mesh._vertices[facetList[i][j]])\n\t\t\t\tself._mesh._halfedges.append(he)\n\t\t# construct the opposite halfedge\n\t\tfor key in edgeMap:\n\t\t\ti = edgeMap[key]\n\t\t\tif self._mesh._halfedges[i].opposite() is None:\n\t\t\t\t# the opposite has not been set\n\t\t\t\toppo = key[1],key[0]\n\t\t\t\tif oppo in edgeMap:\n\t\t\t\t\t# interior edge\n\t\t\t\t\tj = edgeMap[oppo]\n\t\t\t\t\tself._mesh._halfedges[i].setOpposite(self._mesh._halfedges[j])\n\t\t\t\t\tself._mesh._halfedges[j].setOpposite(self._mesh._halfedges[i])\n\t\t\t\telse:\n\t\t\t\t\t# border edge\n\t\t\t\t\t# construct a border halfedge\n\t\t\t\t\the = Halfedge()\n\t\t\t\t\t# the edge is border edge\n\t\t\t\t\the._isBorder = True\n\t\t\t\t\tself._mesh._halfedges[i].setOpposite(he)\n\t\t\t\t\the.setOpposite(self._mesh._halfedges[i])\n\t\t\t\t\t# append the halfedge\n\t\t\t\t\tself._mesh._halfedges.append(he)\n\t\t\t\t\tcount += 1\n\t\t# set next and prev halfedge\n\t\t# the interior edges\n\t\tcount = 0\n\t\tfor i in range(0,len(facetList)):\n\t\t\tfor j in range(0,len(facetList[i])):\n\t\t\t\tif j == len(facetList[i])-1:\n\t\t\t\t\t# the last edge: the next edge is the beggining edge\n\t\t\t\t\tself._mesh._halfedges[count+j].setNext(self._mesh._halfedges[count])\n\t\t\t\t\tself._mesh._halfedges[count+j].setPrev(self._mesh._halfedges[count+j-1])\n\t\t\t\telif j == 0:\n\t\t\t\t\t# the beginning edge: the prev edge is the last one\n\t\t\t\t\tself._mesh._halfedges[count+j].setNext(self._mesh._halfedges[count+j+1])\n\t\t\t\t\tself._mesh._halfedges[count+j].setPrev(self._mesh._halfedges[count+len(facetList[i])-1])\n\t\t\t\telse:\n\t\t\t\t\t# the interior edge\n\t\t\t\t\tself._mesh._halfedges[count+j].setNext(self._mesh._halfedges[count+j+1])\n\t\t\t\t\tself._mesh._halfedges[count+j].setPrev(self._mesh._halfedges[count+j-1])\n\t\t\t# the count\n\t\t\tcount += len(facetList[i])\n\t\t# the border edges\n\t\tfor i in range(count,len(self._mesh._halfedges)):\n\t\t\the = self._mesh._halfedges[i]\n\t\t\t# first set the vertex\n\t\t\the.setVertex(he.opposite().prev().vertex())\n\t\t\t# two kinds of situation\n\t\t\tif he.opposite().prev().opposite().isBorder():\n\t\t\t\the.setNext(he.opposite().prev().opposite())\n\t\t\telse:\n\t\t\t\the.setNext(he.opposite().prev().opposite().prev().opposite())\n\t\t\tif he.opposite().next().opposite().isBorder():\n\t\t\t\the.setPrev(he.opposite().next().opposite())\n\t\t\telse:\n\t\t\t\the.setPrev(he.opposite().next().opposite().next().opposite())", "def get_k2_boundary(tri, v_neighbours):\n three = np.array([0, 1, 2])\n nv = tri.shape[0]\n k2s = np.empty((nv, 3), dtype=np.int32)\n for i in range(nv):\n for k in range(3):\n neighbour = v_neighbours[i, k]\n if neighbour == -1:\n k2s[i,k] = -1\n else:\n k2 = ((v_neighbours[neighbour] == i) * three).sum()\n k2s[i, k] = k2\n return k2s", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def query_rect(triu_reader, i0, i1, j0, j1):\n\n # symmetric query\n if (i0, i1) == (j0, j1):\n i, j, v = triu_reader(i0, i1, i0, i1)\n nodiag = i != j\n i, j, v = np.r_[i, j[nodiag]], np.r_[j, i[nodiag]], np.r_[v, v[nodiag]]\n\n # asymmetric query\n else:\n transpose = False\n if j0 < i0 or (i0 == j0 and i1 < j1):\n i0, i1, j0, j1 = j0, j1, i0, i1\n transpose = True\n\n # non-overlapping\n if _comes_before(i0, i1, j0, j1, strict=True):\n i, j, v = triu_reader(i0, i1, j0, j1)\n\n # partially overlapping\n elif _comes_before(i0, i1, j0, j1):\n ix, jx, vx = triu_reader(i0, j0, j0, i1)\n iy, jy, vy = triu_reader(j0, i1, j0, i1)\n iz, jz, vz = triu_reader(i0, i1, i1, j1)\n nodiag = iy != jy\n iy, jy, vy = np.r_[iy, jy[nodiag]], np.r_[jy, iy[nodiag]], np.r_[vy, vy[nodiag]]\n i, j, v = np.r_[ix, iy, iz], np.r_[jx, jy, jz], np.r_[vx, vy, vz]\n\n # nested\n elif _contains(i0, i1, j0, j1):\n ix, jx, vx = triu_reader(i0, j0, j0, j1)\n iy, jy, vy = triu_reader(j0, j1, j0, j1)\n jz, iz, vz = triu_reader(j0, j1, j1, i1)\n nodiag = iy != jy\n iy, jy, vy = np.r_[iy, jy[nodiag]], np.r_[jy, iy[nodiag]], np.r_[vy, vy[nodiag]]\n i, j, v = np.r_[ix, iy, iz], np.r_[jx, jy, jz], np.r_[vx, vy, vz]\n\n else:\n raise IndexError(\"This shouldn't happen\")\n\n if transpose:\n i, j = j, i\n\n return i, j, v", "def _edge(u, v):\n return (u, v) if u < v else (v, u)", "def neighbour(t, i, j):\n v0 = t[i][(j + 1) % 3]\n v1 = t[i][(j + 2) % 3]\n\n for k in range(len(t)):\n if k != i:\n if v0 in t[k] and v1 in t[k]:\n return k\n\n return None", "def bernstein_surface(i, j, nU, nV, u, v):\n return np.outer(comb(nU, i) * (u ** (nU - i)) * ((1 - u) ** i),\n comb(nV, j) * (v ** (nV - j)) * ((1 - v) ** j))", "def approximate_hypercube_iou(hs: List[Tuple[float, List[float]]], new_hs: Tuple[float, List[float]], m_rel: int) \\\n -> List[float]:\n\n # TODO: umschreiben so dass new_hs die random Punkte enthält -> die müssen dadurch nur einmal erzeugt werden und\n # dann wird für jeden Punkt getestet in welchen hs er noch liegt. -> dadurch Punkte nur einmal erzeugen und\n # Volumen nur einmal berechnen\n\n if hs is None or len(hs) == 0 or not hs:\n return [-1]\n\n number_rand_points = 10000 * m_rel\n\n inside_ref = [0] * len(hs)\n\n new_r, new_c = new_hs\n new_v = calc_hypercube_volume(new_r, m_rel)\n\n for i, (r, c) in enumerate(hs):\n j = 0\n while j < number_rand_points:\n max_x = np.array(c) + r\n min_x = np.array(c) - r\n p = np.random.rand(m_rel) * (max_x - min_x) + min_x\n p_inside_hc = is_point_inside_hypercube(p, c, r)\n if p_inside_hc:\n j += 1\n if p_inside_hc and is_point_inside_hypercube(p, new_c, new_r):\n inside_ref[i] += 1\n\n result = []\n\n for (r, c), k in zip(hs, inside_ref):\n v = calc_hypercube_volume(r, m_rel)\n inter = (k / number_rand_points) * v\n union = v + new_v - inter\n result.append(inter / union)\n\n return result", "def mesh_collapse_edge(self, u, v, t=0.5, allow_boundary=False, fixed=None):\r\n if t < 0.0:\r\n raise ValueError('Parameter t should be greater than or equal to 0.')\r\n if t > 1.0:\r\n raise ValueError('Parameter t should be smaller than or equal to 1.')\r\n\r\n # # collapsing of boundary vertices is currently not supported\r\n # # change this to `and` to support collapsing to or from the boundary\r\n # if self.is_vertex_on_boundary(u) or self.is_vertex_on_boundary(v):\r\n # return\r\n\r\n # # check for contained faces\r\n # for nbr in self.halfedge[u]:\r\n # if nbr in self.halfedge[v]:\r\n # # check if U > V > NBR is a face\r\n # if (self.halfedge[u][v] != self.halfedge[v][nbr] or self.halfedge[u][v] != self.halfedge[nbr][u]):\r\n # # check if V > U > NBR is a face\r\n # if (self.halfedge[v][u] != self.halfedge[u][nbr] or self.halfedge[v][u] != self.halfedge[nbr][v]):\r\n # return\r\n # for nbr in self.halfedge[v]:\r\n # if nbr in self.halfedge[u]:\r\n # # check if U > V > NBR is a face\r\n # if (self.halfedge[u][v] != self.halfedge[v][nbr] or self.halfedge[u][v] != self.halfedge[nbr][u]):\r\n # # check if V > U > NBR is a face\r\n # if (self.halfedge[v][u] != self.halfedge[u][nbr] or self.halfedge[v][u] != self.halfedge[nbr][v]):\r\n # return\r\n\r\n # check collapse conditions\r\n if not is_collapse_legal(self, u, v, allow_boundary=allow_boundary):\r\n return False\r\n\r\n # compare to fixed\r\n fixed = fixed or []\r\n if v in fixed or u in fixed:\r\n return False\r\n\r\n # move U\r\n x, y, z = self.edge_point(u, v, t)\r\n self.vertex[u]['x'] = x\r\n self.vertex[u]['y'] = y\r\n self.vertex[u]['z'] = z\r\n\r\n # UV face\r\n fkey = self.halfedge[u][v]\r\n\r\n if fkey is None:\r\n del self.halfedge[u][v]\r\n\r\n else:\r\n face = self.face_vertices(fkey)\r\n f = len(face)\r\n\r\n # switch between UV face sizes\r\n # note: in a triself this is not necessary!\r\n if f < 3:\r\n raise Exception(\"Invalid self face: {}\".format(fkey))\r\n if f == 3:\r\n # delete UV\r\n o = face[face.index(u) - 1]\r\n del self.halfedge[u][v]\r\n del self.halfedge[v][o]\r\n del self.halfedge[o][u]\r\n del self.face[fkey]\r\n else:\r\n # u > v > d => u > d\r\n d = self.face_vertex_descendant(fkey, v)\r\n face.remove(v)\r\n del self.halfedge[u][v]\r\n del self.halfedge[v][d]\r\n self.halfedge[u][d] = fkey\r\n\r\n # VU face\r\n fkey = self.halfedge[v][u]\r\n\r\n if fkey is None:\r\n del self.halfedge[v][u]\r\n\r\n else:\r\n face = self.face_vertices(fkey)\r\n f = len(face)\r\n\r\n # switch between VU face sizes\r\n # note: in a triself this is not necessary!\r\n if f < 3:\r\n raise Exception(\"Invalid mesh face: {}\".format(fkey))\r\n if f == 3:\r\n # delete UV\r\n o = face[face.index(v) - 1]\r\n del self.halfedge[v][u] # the collapsing halfedge\r\n del self.halfedge[u][o]\r\n del self.halfedge[o][v]\r\n del self.face[fkey]\r\n else:\r\n # a > v > u => a > u\r\n a = self.face_vertex_ancestor(fkey, v)\r\n face.remove(v)\r\n del self.halfedge[a][v]\r\n del self.halfedge[v][u]\r\n self.halfedge[a][u] = fkey\r\n\r\n # V neighbors and halfedges coming into V\r\n for nbr, fkey in list(self.halfedge[v].items()):\r\n\r\n if fkey is None:\r\n self.halfedge[u][nbr] = None\r\n del self.halfedge[v][nbr]\r\n else:\r\n # a > v > nbr => a > u > nbr\r\n face = self.face[fkey]\r\n a = self.face_vertex_ancestor(fkey, v)\r\n face[face.index(v)] = u\r\n\r\n if v in self.halfedge[a]:\r\n del self.halfedge[a][v]\r\n del self.halfedge[v][nbr]\r\n self.halfedge[a][u] = fkey\r\n self.halfedge[u][nbr] = fkey\r\n\r\n # only update what will not be updated in the previous part\r\n # verify what this is exactly\r\n # nbr > v > d => nbr > u > d\r\n if v in self.halfedge[nbr]:\r\n fkey = self.halfedge[nbr][v]\r\n del self.halfedge[nbr][v]\r\n self.halfedge[nbr][u] = fkey\r\n\r\n # delete V\r\n del self.halfedge[v]\r\n del self.vertex[v]", "def corners((u,v)):\r\n return ((u+1,v+1), (u+1,v), (u,v), (u,v+1))", "def inside(self, uv):\n result = self._trimmed.Perform(gp_Pnt2d(uv[0], uv[1]))\n return result == TopAbs_IN", "def is_edge(graph, u, v):\n return graph.matrix[u][v]", "def cw_face_edges(self,face):\n\n l0 = self.region_link[face]\n if face == self.left_region[l0]:\n l0 = (l0[1], l0[0])\n l = l0\n\n traversing = True\n edges = []\n while traversing:\n edges.append(l)\n r = self.right_region[l]\n if r == face:\n l = self.succ_right[l]\n else:\n l = self.succ_left[l]\n if l == l0:\n traversing = False\n return edges", "def householder_transformation(v):\n size_of_v = v.shape[1]\n e1 = np.zeros_like(v)\n e1[0, 0] = 1\n vector = get_norm(v) * e1\n if v[0, 0] < 0:\n vector = - vector\n u = (v + vector).astype(np.float32)\n norm2 = get_norm(u)\n u = u / norm2\n H = np.identity(size_of_v) - ((2 * np.matmul(np.transpose(u), u)) / np.matmul(u, np.transpose(u)))\n return H, u", "def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)" ]
[ "0.60242623", "0.5956021", "0.58149", "0.5760208", "0.56859523", "0.5671715", "0.56677574", "0.5637607", "0.5518257", "0.54610336", "0.54298204", "0.5412329", "0.5374091", "0.53719383", "0.53666955", "0.53609663", "0.53559095", "0.5352723", "0.5348312", "0.5332194", "0.5313079", "0.53066266", "0.53063536", "0.5306167", "0.52991766", "0.5298575", "0.5294784", "0.52927667", "0.5287437", "0.5284519" ]
0.62999815
0
FNV1 64bit hash function Implement this, and/or DJB2.
def fnv1(self, key): # hash = 0xff hash = 0xcbf29ce484222325 for n in key.encode(): # print(n) hash = hash ^ n hash = hash * 0x100000001b3 # print(hash) return hash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fnv1(self, key, seed=0):\n # def fnv1(self, key):\n\n # Your code here\n \"\"\"\n Returns: The FNV-1 hash (64-bit) of a given string. \n \"\"\"\n #Constants : Fails the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a Hash Function\n # hash = offset_basis + seed\n # # hash = offset_basis\n # for c in key:\n # hash = hash * FNV_prime\n # hash = hash ^ ord(c)\n # return hash\n\n \"\"\"\n Returns: The FNV-1a (alternate) hash of a given string\n \"\"\"\n # #Constants : Passes the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a alternate Hash Function\n # hash = offset_basis + seed\n # for c in key:\n # hash = hash ^ ord(c)\n # hash = hash * FNV_prime\n # return hash", "def HashAlgorithm(self) -> _n_7_t_0:", "def FNV1Hash(filename):\n \n FNV1_32_INIT = 0x811c9dc5\n FNV1_PRIME_32 = 16777619\n\n lowerName = filename.lower()\n \n _hash = FNV1_32_INIT\n uint32_max = 2 ** 32\n \n for c in lowerName:\n _hash = (_hash * FNV1_PRIME_32) % uint32_max\n _hash = _hash ^ ord(c)\n return format(_hash, 'x')", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def hash_1(self):\n return self.unpack_qword(0x18)", "def fnvhash(a): \n h = 2166136261 \n for i in a: \n t = (h * 16777619) & 0xffffffffL \n h = t ^ i \n return h", "def _hash_djb2(self, key):\n # OPTIONAL STRETCH: Research and implement DJB2\n hash_grotto = 5381\n for k in key:\n hash_grotto = ((hash_grotto << 5) + hash_grotto) + ord(k)\n return hash_grotto & 0xFFFFFFFF", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def _hash(self):\r\n MAX = sys.maxint\r\n MASK = 2 * MAX + 1\r\n n = len(self)\r\n h = 1927868237 * (n + 1)\r\n h &= MASK\r\n for x in self:\r\n hx = hash(x)\r\n h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167\r\n h &= MASK\r\n h = h * 69069 + 907133923\r\n h &= MASK\r\n if h > MAX:\r\n h -= MASK + 1\r\n if h == -1:\r\n h = 590923713\r\n return h", "def get_num_slots(self):\n Return the load factor for this hash table.\n\n Implement this.\n \"\"\"\n return self.elements / self.capacity\n\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 Hash, 64-bit\n\n Implement this, and/or DJB2.pyy\n \"\"\"\n\n # Your code here\n\n\n def djb2(self, key):\n \"\"\"\n DJB2 hash, 32-bit\n\n Implement this, and/or FNV-1.\n \"\"\"\n # Your code here\n\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n #return self.fnv1(key) % self.capacity\n<<<<<<< Updated upstream\n return self.djb2(key) % self.capacity\n=======\n return self.djb2(key) % len(self.storage)\n>>>>>>> Stashed changes\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # need to account for if the key value is the same \n\n i = self.hash_index(key)\n if not self.storage[i]:\n hte = HashTableEntry(key, value)\n self.storage[i] = hte\n self.elements += 1\n hte.head = HashTableEntry(key, value)\n elif self.storage[i] and self.storage[i].key != key:\n self.storage[i].insert_at_head(HashTableEntry(key, value))\n>>>>>>> Stashed changes\n\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n i = self.hash_index(key)\n node = self.storage[i]\n prev = None\n if node.key == key:\n self.storage[i] = node.next\n return\n while node != None:\n if node.key == key:\n prev.next = node.next\n self.storage[i].next = None\n return\n prev = node\n node = node.next\n self.elements -= 1\n return\n>>>>>>> Stashed changes\n\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n # - find the index in the hash table for the key\n i = self.hash_index(key)\n # - search the list for that key\n if not self.storage[i]:\n return None\n else:\n if self.storage[i].find_key(key) == key:\n return self.storage[i].value\n>>>>>>> Stashed changes\n\n\n def resize(self, new_capacity):\n \"\"\"\n Changes the capacity of the hash table and\n rehashes all key/value pairs.\n\n Implement this.\n \"\"\"\n<<<<<<< Updated upstream\n # Your code here\n=======\n prev_storage = self.storage\n self.capacity = new_cap\n self.storage = [None] * new_cap\n for i in range(len(prev_storage)):\n prev = prev_storage[i]\n if prev:\n while prev:\n if prev.key:\n self.put(prev.key, prev.value)\n prev = prev.next\n\n>>>>>>> Stashed changes\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(8)\n\n ht.put(\"line_1\", \"'Twas brillig, and the slithy toves\")\n ht.put(\"line_2\", \"Did gyre and gimble in the wabe:\")\n ht.put(\"line_3\", \"All mimsy were the borogoves,\")\n ht.put(\"line_4\", \"And the mome raths outgrabe.\")\n ht.put(\"line_5\", '\"Beware the Jabberwock, my son!')\n ht.put(\"line_6\", \"The jaws that bite, the claws that catch!\")\n ht.put(\"line_7\", \"Beware the Jubjub bird, and shun\")\n ht.put(\"line_8\", 'The frumious Bandersnatch!\"')\n ht.put(\"line_9\", \"He took his vorpal sword in hand;\")\n ht.put(\"line_10\", \"Long time the manxome foe he sought--\")\n ht.put(\"line_11\", \"So rested he by the Tumtum tree\")\n ht.put(\"line_12\", \"And stood awhile in thought.\")\n\n print(\"\")\n\n # Test storing beyond capacity\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n # Test resizing\n old_capacity = ht.get_num_slots()\n ht.resize(ht.capacity * 2)\n new_capacity = ht.get_num_slots()\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n for i in range(1, 13):\n print(ht.get(f\"line_{i}\"))\n\n print(\"\")", "def _hash_function(self, n):\n # Get the mask for this n, or make a new one of 32 random bits.\n mask = self._memomask.get(n)\n if mask is None:\n random.seed(n ^ self.seed_mask)\n mask = self._memomask[n] = int(random.getrandbits(32))\n # Now return a function that uses Jenkins Hash\n #\n def somehash(x):\n return hashlittle(x, mask)\n return somehash", "def hash(self) -> bytes:", "def hash_functions(self):\n pass", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def getHashLfn(lfn):\n return hashlib.sha224(lfn).hexdigest()", "def _hash(self, key):\n # OPTIONAL STRETCH: You may replace the Python hash with DJB2 as a stretch goal\n # return hash(key)\n return self._hash_djb2(key)", "def hash(x) -> int:\n pass", "def _hash_function(self, key):\n h = 0\n a = 31\n table_size = self.size\n for i in range(len(key)):\n h = (h * a + ord(key[i])) % table_size\n return h", "def hash(self) -> str:\r\n ...", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash_function(self, k):\n return (hash(k) * self._scale + self._shift) % self._prime % len(self._table)", "def unique_hash(self):\n raise NotImplementedError(\"unique_hash Method not implemented\")", "def hash_2(self):\n return self.unpack_qword(0x20)", "def fnv_1a(key: KeyT, seed: int = 0) -> int:\n max64mod = UINT64_T_MAX + 1\n hval = (14695981039346656037 + (31 * seed)) % max64mod\n fnv_64_prime = 1099511628211\n tmp = list(key) if not isinstance(key, str) else list(map(ord, key))\n for t_str in tmp:\n hval ^= t_str\n hval *= fnv_64_prime\n hval %= max64mod\n return hval", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)", "def calHash(n, m):\n return int(m*BloomFilter.ln2/n)" ]
[ "0.7850084", "0.72370136", "0.7108986", "0.705044", "0.6990908", "0.691239", "0.67458034", "0.6731104", "0.6608719", "0.6538066", "0.65314394", "0.6447355", "0.6440217", "0.6417119", "0.63966304", "0.6386951", "0.63631225", "0.6338462", "0.6330434", "0.63016164", "0.62938416", "0.62520784", "0.62520784", "0.6247326", "0.6244987", "0.6243137", "0.6238413", "0.6222898", "0.62160987", "0.61988044" ]
0.75328153
1