query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test case for get_case_by_id | def test_get_case_by_id(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_case(loqusdbapi, mocker):\n # GIVEN a loqusdb api\n case_id = 'a_case'\n # WHEN fetching a case with the adapter\n mocker.patch.object(subprocess, 'check_output')\n loqusdb_output = (b\"{'_id': 'one_case', 'case_id': 'one_case'}\\n\"\n b\"{'_id': 'a_case', 'case_id': 'a_case'}\\n\")\n subprocess.check_output.return_value = loqusdb_output\n case_obj = loqusdbapi.get_case(case_id)\n # THEN assert that the correct case id is returned\n assert case_obj['_id'] == case_id",
"def get_case(\n case_id: str,\n db: Session = Depends(get_db),\n) -> Any:\n case_and_site = crud.case.get_case_with_site(db, id=case_id)\n if not case_and_site:\n return None\n (case, site) = case_and_site\n return schemas.CaseWithTaskInfo.get_case_with_task_info(case, site)",
"def get_case(self, key: str):\n case = self.cases.get(key)\n if not hasattr(case, 'case_id'):\n message = \"get_case(): Case key {} does not have a case_id\"\n logmessage(message.format(key))\n else:\n logmessage(\"get_case(): \" + \"Retrieved case {}\".format(str(case)))\n return case",
"def view_cases(context,case_id):\n\n adapter = context.obj['adapter']\n\n if case_id is not None:\n results = adapter.find_case({'case_id': case_id})\n\n else:\n results = adapter.find_cases({})\n\n click.echo(pprint(results))",
"def test_id_only_int(self):\n td = self.create_testdata()\n res = self.filter([int(td[\"cv1\"].case.id)])\n\n self.assertEqual(res.get().name, \"CV 1\")",
"def get_case(self, case_id: str) -> Union[int, None]:\n for index, case in enumerate(self.cases):\n if case.id == case_id:\n return index\n return None",
"def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))",
"def test_cyclingleagues_id_get(self):\n pass",
"def case_id():\n return 3000",
"def get_case(self, case_id, full_case=False):\n url = self._get_api_url() + \"cases/\" + str(case_id)\n\n if (full_case):\n url = url + \"/?full_case=true\"\n\n case = self._request(url)\n return case.json()",
"def test_get_comment_information_by_id():\n get_comment_information_by_id('g99c7c0')",
"def test_get_campaign_by_id_passes(self):\n response = self.client.get(f\"{self.endpoint_url}{self.test_campaign.id}/\")\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"id\": CAMPAIGN_ID, \"name\": CAMPAIGN_NAME})",
"def test_id_case_without_prefix(self):\n td = self.create_testdata()\n res = self.filter([unicode(td[\"cv2\"].case.id)])\n\n self.assertEqual(res.get().name, \"CV 2\")",
"def test_solareclipses_id_get(self):\n pass",
"def case(self, case_id, is_external=False):\r\n return cases.Case(self, case_id, is_external)",
"def test_get_by_id(self):\n actual = chef_role.get_by_id(self.role_id)\n eq_(actual['chef_role_name'], self.role_name)",
"def test_prefectures_id_get(self):\n pass",
"def test_get_chain_by_id(self):\n pass",
"def test_christiandoctrines_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/christiandoctrines/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_brains_id_get(self):\n pass",
"def test_drugs_id_get(self):\n pass",
"def get_case(person_id, disease_id):\n\n ctable = current.s3db.disease_case\n query = (ctable.person_id == person_id) & \\\n (ctable.disease_id == disease_id) & \\\n (ctable.deleted != True)\n record = current.db(query).select(ctable.id,\n ctable.case_number,\n limitby = (0, 1)).first()\n return record",
"def test_intercommunalitys_id_get(self):\n pass",
"def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None",
"def test_get_recipe_equipment_by_id(self):\n pass",
"def get(self, case_number):\n return self._cases_service.get(case_number)",
"def test_medicians_id_get(self):\n pass",
"def test_getId(self):\n cases = [\n (self.test_eac + 'NE00401.xml','NE00401'),\n (self.test_eac + 'NE00101.xml','NE00101'),\n (self.test_eac + 'NE00915.xml','NE00915'),\n (self.test_eac + 'NE01001.xml','NE01001'),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n result = doc.getRecordId()\n self.assertNotEqual(doc, None)\n self.assertEquals(result, expected)",
"def check_access_and_get_testcase(testcase_id):\n if not helpers.get_user_email():\n raise helpers.UnauthorizedError()\n\n if not testcase_id:\n raise helpers.EarlyExitError('No test case specified!', 404)\n\n try:\n testcase = data_handler.get_testcase_by_id(testcase_id)\n except errors.InvalidTestcaseError:\n raise helpers.EarlyExitError('Invalid test case!', 404)\n\n if not can_user_access_testcase(testcase):\n raise helpers.AccessDeniedError()\n\n return testcase",
"def test_resids(self):\n cr = CaseReader(self.filename)\n last_case = cr.get_case(-1)\n self.assertIsNone(last_case.resids,\n \"Case erroneously contains resids.\")"
] | [
"0.7206168",
"0.7135359",
"0.70535195",
"0.6983086",
"0.69185466",
"0.6779661",
"0.6560765",
"0.6444879",
"0.6441801",
"0.643984",
"0.64307916",
"0.6415146",
"0.63614887",
"0.63151497",
"0.6309817",
"0.6308288",
"0.6291289",
"0.62707704",
"0.62072265",
"0.62045175",
"0.6164589",
"0.61460763",
"0.61310136",
"0.61180604",
"0.6108853",
"0.60624456",
"0.6054673",
"0.6047162",
"0.60330063",
"0.60254186"
] | 0.949092 | 0 |
Test case for get_cases_for_dict | def test_get_cases_for_dict(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def navigate_case_dictionary(case_list_for_run, num_cases):",
"def test_create_results_dict_1(self):\n dict = find_domains.create_results_dict(self.rps_results)\n with self.subTest():\n self.assertEqual(len(dict.keys()), 4)\n with self.subTest():\n self.assertEqual(len(dict[\"ABCDE\"]), 2)\n with self.subTest():\n self.assertEqual(len(dict[\"FGHIJ\"]), 2)",
"def test_map(self):\n\n test_cases = [\n Case(\n description=\"lists of objects\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", \"baz\"],\n ),\n Case(\n description=\"missing argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\", \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"missing property\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[\"foo\", \"bar\", None],\n ),\n Case(\n description=\"value not an array\",\n val=123,\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non object\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, 5, []],\n args=[\"title\"],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[\"title\"],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=[None, None, None],\n ),\n ]\n\n self._test(Map, test_cases)",
"def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)",
"def check_for_dict(check):",
"def test_values(self):\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {Sample('1.SKB1.640202', self.tester),\n Sample('1.SKB2.640194', self.tester),\n Sample('1.SKB3.640195', self.tester),\n Sample('1.SKB4.640189', self.tester),\n Sample('1.SKB5.640181', self.tester),\n Sample('1.SKB6.640176', self.tester),\n Sample('1.SKB7.640196', self.tester),\n Sample('1.SKB8.640193', self.tester),\n Sample('1.SKB9.640200', self.tester),\n Sample('1.SKD1.640179', self.tester),\n Sample('1.SKD2.640178', self.tester),\n Sample('1.SKD3.640198', self.tester),\n Sample('1.SKD4.640185', self.tester),\n Sample('1.SKD5.640186', self.tester),\n Sample('1.SKD6.640190', self.tester),\n Sample('1.SKD7.640191', self.tester),\n Sample('1.SKD8.640184', self.tester),\n Sample('1.SKD9.640182', self.tester),\n Sample('1.SKM1.640183', self.tester),\n Sample('1.SKM2.640199', self.tester),\n Sample('1.SKM3.640197', self.tester),\n Sample('1.SKM4.640180', self.tester),\n Sample('1.SKM5.640177', self.tester),\n Sample('1.SKM6.640187', self.tester),\n Sample('1.SKM7.640188', self.tester),\n Sample('1.SKM8.640201', self.tester),\n Sample('1.SKM9.640192', self.tester)}\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs), key=lambda x: x.id),\n sorted(exp, key=lambda x: x.id)):\n self.assertEqual(o, e)",
"def test_dict(self, dictionary: dict) -> None:\r\n if not isinstance(dictionary, dict):\r\n raise ValueError(f'Expected dictionary, but received {type(dictionary)}')\r\n for key, value in dictionary.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if isinstance(value, dict):\r\n self.test_dict(dictionary=value)\r\n elif isinstance(value, list):\r\n self.test_list(items=value)",
"def test_values(self):\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {PrepSample('1.SKB1.640202', self.tester),\n PrepSample('1.SKB2.640194', self.tester),\n PrepSample('1.SKB3.640195', self.tester),\n PrepSample('1.SKB4.640189', self.tester),\n PrepSample('1.SKB5.640181', self.tester),\n PrepSample('1.SKB6.640176', self.tester),\n PrepSample('1.SKB7.640196', self.tester),\n PrepSample('1.SKB8.640193', self.tester),\n PrepSample('1.SKB9.640200', self.tester),\n PrepSample('1.SKD1.640179', self.tester),\n PrepSample('1.SKD2.640178', self.tester),\n PrepSample('1.SKD3.640198', self.tester),\n PrepSample('1.SKD4.640185', self.tester),\n PrepSample('1.SKD5.640186', self.tester),\n PrepSample('1.SKD6.640190', self.tester),\n PrepSample('1.SKD7.640191', self.tester),\n PrepSample('1.SKD8.640184', self.tester),\n PrepSample('1.SKD9.640182', self.tester),\n PrepSample('1.SKM1.640183', self.tester),\n PrepSample('1.SKM2.640199', self.tester),\n PrepSample('1.SKM3.640197', self.tester),\n PrepSample('1.SKM4.640180', self.tester),\n PrepSample('1.SKM5.640177', self.tester),\n PrepSample('1.SKM6.640187', self.tester),\n PrepSample('1.SKM7.640188', self.tester),\n PrepSample('1.SKM8.640201', self.tester),\n PrepSample('1.SKM9.640192', self.tester)}\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs), key=lambda x: x.id),\n sorted(exp, key=lambda x: x.id)):\n self.assertEqual(o, e)",
"def test_fn_call_with_dict():\n l = [1, 2, 3, 4, 5]\n ds = [defaultdict(int), defaultdict(int), defaultdict(int)]\n for d in ds:\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called_with_dict(dict_=d, fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in d.keys() and d[fn] == (i + 1)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__",
"def test_sample_mapped_keys(self):\r\n\r\n # With num_coverage=1 only the keys will be sampled\r\n actual = sample_mapped_keys(self.test_map, 1)\r\n self.assertEqual(actual, {'1': ['1'], '2': ['2']})\r\n\r\n actual = sample_mapped_keys(self.test_map, 3)\r\n for key in actual.keys():\r\n # check number of sampled keys\r\n self.assertEqual(3, len(actual[key]))\r\n for x in actual[key]:\r\n # check that sampled key is in the full list\r\n correct = list(self.test_map[key])\r\n correct.append(key)\r\n self.assertTrue(x in correct)",
"def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()",
"def test_convert(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]).data, test[1])",
"def test_returns_dict(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_dict():\n return {'output': 1, 'input': 2, 'extra': 3}\n\n r = returns_dict()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)",
"def test1(self) -> None:\n dict_ = {\"key0\": \"value0\", \"key1\": \"value1\"}\n actual_result = list(hdict.get_nested_dict_iterator(dict_))\n expected_result = [((\"key0\",), \"value0\"), ((\"key1\",), \"value1\")]\n self.assertListEqual(actual_result, expected_result)",
"def test_cases():\n assert count('aba') == {'a': 2, 'b': 1}\n assert count('abcddbacdb') == {'a': 2,'b': 3,'c': 2,'d': 3}\n assert count('') == {}\n print(\"Test Success!\")",
"def test_1():\n results = base_tests()\n assert type(results) is list\n assert type(results[0]) is dict\n assert len(results) == 3",
"def test_comparing(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]), test[1])",
"def verifyData(self, expectedDict):\n pass",
"def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('center_name', 'ANL'), ('center_project_name', None),\n ('emp_status', 'EMP'), ('barcodesequence', 'AGCGCTCACATC'),\n ('library_construction_protocol',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that '\n 'of ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse '\n 'PCR primer is barcoded with a 12-base error-correcting Golay '\n 'code to facilitate multiplexing of up to 1,500 samples per '\n 'lane, and both PCR primers contain sequencer adapter '\n 'regions.'), ('linkerprimersequence', 'GTGCCAGCMGCCGCGGTAA'),\n ('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),\n ('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),\n ('run_date', '8/1/12'), ('experiment_center', 'ANL'),\n ('experiment_design_description',\n 'micro biome of soil and rhizosphere of cannabis plants '\n 'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),\n ('platform', 'Illumina'), ('samp_size', '.25,g'),\n ('sequencing_meth', 'Sequencing by synthesis'),\n ('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),\n ('pcr_primers',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),\n ('study_center', 'CCME')}\n self.assertEqual(set(obs), exp)",
"def test_dict(self):\n self.assertValue(\n {'foo': 'foo', 'bar': 43, 'zippy': 'zoo'},\n 'bar: 43 foo: foo zippy: zoo\\n'\n )",
"def test_parametrized_cases_tests( self ):\n my_cases = JSON_FILES_PATH + \"testingCases_RF1.csv\"\n with open(my_cases, newline='', encoding='utf-8') as csvfile:\n param_test_cases = csv.DictReader(csvfile, delimiter=';')\n my_code = AccessManager()\n for row in param_test_cases:\n print(\"Param:\" + row[ 'ID TEST' ] + row[ \"VALID INVALID\" ])\n if row[ \"VALID INVALID\" ] == \"VALID\":\n valor = my_code.request_access_code( row[ \"DNI\" ], row[ \"NAME SURNAME\" ],\n row[ \"ACCESS TYPE\" ], row[ \"email\" ],\n int(row[ \"VALIDITY\" ]))\n self.assertEqual( row[ 'EXPECTED RESULT' ], valor)\n # Check if this DNI is store in storeRequest.json\n generated_request = AccessRequest.create_request_from_code(valor,row[ \"DNI\" ])\n my_request = AccessRequest(row[ \"DNI\" ], row[ \"NAME SURNAME\" ],\n row[ \"ACCESS TYPE\" ], row[ \"email\" ],\n int(row[ \"VALIDITY\" ]))\n self.assertDictEqual(generated_request.__dict__, my_request.__dict__)\n else:\n with self.assertRaises(AccessManagementException) as c_m:\n valor = my_code.request_access_code(row[ \"DNI\" ], row[ \"NAME SURNAME\" ],\n row[ \"ACCESS TYPE\" ], row[ \"email\" ],\n int(row[ \"VALIDITY\" ]))\n self.assertEqual(c_m.exception.message, row[ 'EXPECTED RESULT' ])",
"def test():\n test = [{'key': 'val1'}, ['key']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def test_values(self):\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {'ANL', None, None, None, 'EMP', 'AGCGCTCACATC',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that of'\n ' ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse PCR'\n ' primer is barcoded with a 12-base error-correcting Golay code'\n ' to facilitate multiplexing of up to 1,500 samples per lane, '\n 'and both PCR primers contain sequencer adapter regions.',\n 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL',\n 's_G1_L001_sequences', '8/1/12', 'ANL',\n 'micro biome of soil and rhizosphere of cannabis plants from '\n 'CA', 'Cannabis Soil Microbiome', 'Illumina', '.25,g',\n 'Sequencing by synthesis', 'MiSeq', 'ANL',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME'}\n self.assertEqual(set(obs), exp)",
"def test_d(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'd'\n self.assertEqual(6, switch_average(user_dict, user_key.upper()))",
"def test_sum_dict_values(self, mocker):\n\n mocked = mocker.patch.object(\n LeafNodeScaledConformalPredictor, \"_sum_dict_values\"\n )\n\n dummy_confo_model = DummyLeafNodeScaledConformalPredictor()\n\n # set leaf_node_counts attribute so np.apply_along_axis can run\n dummy_confo_model.leaf_node_counts = {\"a\": 1}\n\n leaf_node_predictions_value = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n\n dummy_confo_model._count_leaf_node_visits_from_calibration(\n leaf_node_predictions_value\n )\n\n assert (\n mocked.call_count == leaf_node_predictions_value.shape[0]\n ), \"incorrect number of calls to _sum_dict_values\"\n\n for call_no in range(leaf_node_predictions_value.shape[0]):\n\n call_args = mocked.call_args_list[call_no]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert call_kwargs == {\n \"counts\": dummy_confo_model.leaf_node_counts\n }, f\"keyword args in _sum_dict_values call {call_no} incorrect\"\n\n assert (\n len(call_pos_args) == 1\n ), f\"number of positional args in _sum_dict_values call {call_no} incorrect\"\n\n np.testing.assert_array_equal(\n call_pos_args[0], leaf_node_predictions_value[call_no, :]\n )",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def test_2():\n results = base_tests()\n correct = {\n \"Consequence\": \"synonymous_variant\",\n \"Codons\": \"tgC/tgT\",\n \"Amino_acids\": \"C\",\n \"Gene\": \"ENSG00000130164\",\n \"SYMBOL\": \"LDLR\",\n \"Feature\": \"ENST00000558013\",\n \"EXON\": \"2/18\",\n \"PolyPhen\": \"\",\n \"SIFT\": \"\",\n \"Protein_position\": \"27/858\",\n 'BIOTYPE\"': \"protein_coding\",\n }\n assert results[0] == correct",
"def test_sum_dict_values_returned(self, mocker):\n\n # set the return value from _sum_dict_values calls\n sum_dict_values_return_values = [-2, 1, 0]\n\n mocker.patch.object(\n LeafNodeScaledConformalPredictor,\n \"_sum_dict_values\",\n side_effect=sum_dict_values_return_values,\n )\n\n dummy_confo_model = DummyLeafNodeScaledConformalPredictor()\n\n # set leaf_node_counts attribute so np.apply_along_axis can run\n dummy_confo_model.leaf_node_counts = {\"a\": 1}\n\n # set leaf_node_predictions arg so _sum_dict_values will be called 3 times\n leaf_node_predictions_value = np.array([[1], [2], [3]])\n\n results = dummy_confo_model._count_leaf_node_visits_from_calibration(\n leaf_node_predictions_value\n )\n\n np.testing.assert_array_equal(results, np.array(sum_dict_values_return_values))",
"def test_if_keys_or_values_in_result_dict_are_int(self):\n for key, value in add_expressions(1, 2, 8)(2, 3).items():\n self.assertIsInstance(key, int)\n self.assertIsInstance(value, int)",
"def test_c(self):\n user_dict = {'A': 3, 'B': 4, 'C': 5, 'D': 6, 'E': 7}\n user_key = 'C'\n self.assertEqual(5, switch_average(user_dict, user_key.upper()))"
] | [
"0.70761895",
"0.65705884",
"0.647785",
"0.6304288",
"0.6191103",
"0.61260873",
"0.6123775",
"0.60676205",
"0.60428697",
"0.60196096",
"0.59500134",
"0.58859193",
"0.5870065",
"0.5868873",
"0.5839127",
"0.5819128",
"0.57976145",
"0.5794307",
"0.57906353",
"0.57838327",
"0.57612455",
"0.5760646",
"0.57536936",
"0.57531035",
"0.575071",
"0.57481056",
"0.572681",
"0.5693534",
"0.5678608",
"0.56696934"
] | 0.93498963 | 0 |
Test case for get_sync_history | def test_get_sync_history(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tracker_getHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.addHistory([1, 1, 1, 1])\n\n assert tr.getHistory()[1] == [1, 1, 1, 1]",
"def test_get_team_history(self):\n pass",
"def QueryHistory(self):\n return []",
"def testGetHistory(self):\n self.maxDiff = None\n container_obj = self.explorer_object.GetContainer(\n 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')\n expected = {\n '1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125':\n collections.OrderedDict({\n 'size': 0\n }),\n 'df557f39d413a1408f5c28d8aab2892f927237ec22e903ef04b331305130ab38':\n collections.OrderedDict({\n 'created_at':\n '2018-12-26T08:20:42.687925+00:00',\n 'container_cmd': '/bin/sh -c #(nop) ADD file:ce026b62356eec3ad1214f92be2c9dc063fe205bd5e600be3492c4dfb17148bd in / ',\n 'size': 1154361\n })\n }\n\n self.assertEqual(expected, container_obj.GetHistory())",
"def testGetHistory(self):\n self.maxDiff = None\n container_obj = self.explorer_object.GetContainer(\n '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')\n expected = collections.OrderedDict({\n 'sha256:'\n '7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768': {\n 'created_at': '2017-01-13T22:13:54.401355+00:00',\n 'container_cmd': '/bin/sh -c #(nop) CMD [\"sh\"]',\n 'size': 0\n }\n })\n\n self.assertEqual(expected, container_obj.GetHistory())",
"def testGetHistory(self):\n self.maxDiff = None\n container_obj = self.explorer_object.GetContainer(\n '5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')\n expected = collections.OrderedDict({\n 'sha256:'\n '5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3': {\n 'created_at': '2018-01-24T04:29:35.590938+00:00',\n 'container_cmd': '/bin/sh -c #(nop) CMD [\"sh\"]',\n 'size': 0\n }\n })\n self.assertEqual(expected, container_obj.GetHistory())",
"def test_tracker_addHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.addHistory([1, 1, 1, 1])\n\n assert len(tr.history) >= 1",
"async def test_retrieve_history_orders_by_ticket(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_ticket = AsyncMock(return_value=history_orders)\n actual = await api.get_history_orders_by_ticket('46214692')\n assert actual == history_orders\n client.get_history_orders_by_ticket.assert_called_with('accountId', '46214692')",
"def history():",
"def testGetHistory(self):\n self.maxDiff = None\n container_obj = self.explorer_object.GetContainer(\n '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')\n expected = {\n 'sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7':\n collections.OrderedDict({\n 'created_at': '2018-04-05T10:41:28.876407+00:00',\n 'container_cmd': '/bin/sh -c #(nop) CMD [\"sh\"]',\n 'size': 0\n })\n }\n self.assertEqual(expected, container_obj.GetHistory(container_obj))",
"async def test_retrieve_history_orders_by_time_range(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_time_range = AsyncMock(return_value=history_orders)\n start_time = datetime.now() - timedelta(seconds=1)\n end_time = datetime.now()\n actual = await api.get_history_orders_by_time_range(start_time, end_time, 1, 100)\n assert actual == history_orders\n client.get_history_orders_by_time_range.assert_called_with('accountId', start_time, end_time, 1, 100)",
"def fetch_history(*args, **kwargs):\n return collect_history(*args, **kwargs)",
"def get_history(self):\r\n\r\n return self.board_history",
"def test_get_derived_metric_history(self):\n pass",
"def getChanges():",
"async def test_retrieve_history_orders_by_position(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_position = AsyncMock(return_value=history_orders)\n actual = await api.get_history_orders_by_position('46214692')\n assert actual == history_orders\n client.get_history_orders_by_position.assert_called_with('accountId', '46214692')",
"def test_query_trade_history_not_shared_cache(data_dir):\n\n def first_trades(currencyPair, start, end): # pylint: disable=unused-argument\n return {'BTC': [{'data': 1}]}\n\n def second_trades(currencyPair, start, end): # pylint: disable=unused-argument\n return {'BTC': [{'data': 2}]}\n\n messages_aggregator = MessagesAggregator()\n end_ts = 99999999999\n first_user_dir = os.path.join(data_dir, 'first')\n os.mkdir(first_user_dir)\n second_user_dir = os.path.join(data_dir, 'second')\n os.mkdir(second_user_dir)\n a = Poloniex(b'', b'', first_user_dir, messages_aggregator)\n with patch.object(a, 'returnTradeHistory', side_effect=first_trades):\n result1 = a.query_trade_history(0, end_ts, end_ts)\n\n b = Poloniex(b'', b'', second_user_dir, messages_aggregator)\n with patch.object(b, 'returnTradeHistory', side_effect=second_trades):\n result2 = b.query_trade_history(0, end_ts, end_ts)\n\n assert result1['BTC'][0]['data'] == 1\n assert result2['BTC'][0]['data'] == 2",
"def get_history_data(self, exchange, pair, timedelta):\n return self.ccxt.get_history_data(exchange, pair, timedelta)",
"def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))",
"def test_users_can_track_edit_history(self):\n token = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token)\n\n response = self.create_comment(\n token=token,\n parentId=0,\n slug=response.data['article']['slug']\n )\n update_comment_url = reverse('crud-comment', kwargs={\n 'id': response.data['comment']['id']\n })\n response = self.client.put(\n update_comment_url,\n HTTP_AUTHORIZATION=token,\n data=VALID_COMMENT_2,\n format='json'\n )\n get_comment_url = reverse(\n 'crud-comment',\n kwargs={'id':response.data['id']}\n )\n token2 = self.create_user(VALID_USER_DATA_2)\n response = self.client.get(\n get_comment_url,\n HTTP_AUTHORIZATION=token2\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK\n )\n self.assertIn(\n 'version',\n response.data['commentHistory'],\n )",
"def get_history(self):\n return self.history",
"def assert_history(self, rows):\r\n self.assertEqual(self.parse_rows(rows), self.read_history())",
"async def test_get_dispatch_route_history(client):\n params = [('access_token', 'access_token_example'),\n ('start_time', 56),\n ('end_time', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes/{route_id}/history'.format(route_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def get_history(self, clocked: 'Clocked'):\n history = {}\n\n new_tick = self._get_new_tick(clocked)\n\n vclock_history = attributes.get_history(clocked, 'vclock')\n is_vclock_unchanged = (vclock_history.unchanged and\n new_tick == vclock_history.unchanged[0])\n\n for prop in self.history_models.keys():\n value = self._get_prop_value(clocked, prop)\n\n if value is not NOT_FOUND_SENTINEL:\n history[prop] = value\n\n return history, is_vclock_unchanged",
"def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim",
"def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')",
"def test_bookmark_sync(self):\r\n self._get_good_request(content=True, second_bmark=True)\r\n\r\n # test that we only get one resultback\r\n res = self.testapp.get('/api/v1/admin/extension/sync',\r\n params={'api_key': API_KEY},\r\n status=200)\r\n\r\n self.assertEqual(\r\n res.status, \"200 OK\",\r\n msg='Get status is 200, ' + res.status)\r\n\r\n self.assertTrue(\r\n GOOGLE_HASH in res.body,\r\n \"The google hash id should be in the json: \" + res.body)\r\n self.assertTrue(\r\n BMARKUS_HASH in res.body,\r\n \"The bmark.us hash id should be in the json: \" + res.body)\r\n self._check_cors_headers(res)",
"def mock_history_processing(rotki: Rotkehlchen, remote_errors=False):\n mock_function = check_result_of_history_creation\n if remote_errors:\n mock_function = check_result_of_history_creation_for_remote_errors\n accountant_patch = patch.object(\n rotki.accountant,\n 'process_history',\n side_effect=mock_function,\n )\n return accountant_patch",
"def test_setInputHistory(self):\n self.widget.setInputHistory(History([\"a\", \"b\", \"c\"]))\n self.assertEqual(self.widget.getInputHistory(), [\"a\", \"b\", \"c\"])",
"def test_tracker_clearHistory():\n\n trackers, cap = init_tracker()\n tr = trackers[0]\n tr.clearHistory()\n\n assert len(tr.getHistory()) == 0"
] | [
"0.692454",
"0.68217295",
"0.6417337",
"0.63609856",
"0.6296297",
"0.6270521",
"0.6258328",
"0.6244751",
"0.62230396",
"0.6212335",
"0.6196629",
"0.60348105",
"0.602882",
"0.5997421",
"0.5959013",
"0.5924428",
"0.5920476",
"0.59072894",
"0.58979166",
"0.58102685",
"0.57995194",
"0.5798062",
"0.5788464",
"0.57698065",
"0.5739475",
"0.5730417",
"0.56833726",
"0.56603944",
"0.5656698",
"0.5640358"
] | 0.9430264 | 0 |
Test case for update_case | def test_update_case(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_scenario(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_add_or_update_case(self):\n pass",
"def test_update_record(self):\n pass",
"def test_update_one(self):\n pass",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_update9(self):\n pass",
"def test_update_state1(self):\n pass",
"def test_update_state4(self):\n pass",
"def test_update_state2(self):\n pass",
"def test_update_occurrence(self):\n pass",
"def test_update_rule(self):\n pass",
"def test_update_state3(self):\n pass",
"def test_update_cases_from_fogbugz(mocked_update, transactional_db, case):\n update_cases_from_fogbugz()\n mocked_update.apply_async.assert_called_once_with(kwargs=dict(case_id=case.id))",
"def test_update_state(self):\n pass",
"def test_user_update_request(self):\n pass",
"def test_update_activity(self):\n pass",
"def test_update_cases_to_fogbugz(mocked_update, transactional_db, case):\n update_cases_to_fogbugz()\n mocked_update.apply_async.assert_called_once_with(kwargs=dict(case_id=case.id))",
"def test_update_goal(self):\n pass",
"def test_update_collection(self):\n pass",
"def test_client_update(self):\n pass",
"def test_update(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Candy\")\n assert n_updated == 1\n items = list(test_store.get_by())\n\n candy.age = 15\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)",
"def test_update(app):\n\n assert False",
"def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']",
"def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'",
"def test_update_attribute_data(self):\n pass",
"def test_full_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n self.assertEqual(self.product_1.sku, '44444444')\n self.assertEqual(self.product_1.category, self.category_1)\n self.assertEqual(self.product_1.description, 'Some product description')\n self.assertEqual(self.product_1.price, 129.99)\n self.assertEqual(self.product_1.featured, False)\n\n payload = {\n 'name': 'Updated name',\n 'category': self.category_2.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99,\n 'featured': True\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.put(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.category, self.category_2)\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)\n self.assertEqual(product.featured, True)"
] | [
"0.8599205",
"0.849513",
"0.849513",
"0.849513",
"0.8350617",
"0.8206942",
"0.81498384",
"0.81489325",
"0.78003776",
"0.75835353",
"0.75727797",
"0.7463717",
"0.7440917",
"0.7428357",
"0.7388373",
"0.7385997",
"0.73503935",
"0.7333985",
"0.73062307",
"0.7268944",
"0.72565717",
"0.72371036",
"0.7208849",
"0.71794355",
"0.7158332",
"0.7153649",
"0.714196",
"0.7074285",
"0.7041866",
"0.7027655"
] | 0.93751144 | 0 |
Unset key from the encryptor and decryptor | def unset_cipher(self, key_name=None):
if key_name is None:
if self.key_name is not None:
message_key_types.unset_cipher(self.key_name)
if self.pending_key_name is not None:
message_key_types.unset_cipher(self.pending_key_name)
else:
message_key_types.unset_cipher(key_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def del_key(self):\n # Deleting the values from the self.key and self.cryptor attributes.\n self.key=None\n self.cryptor=None",
"def clear_key(self, key):\r\n return self.handler.clear_key(key_to_code(key))",
"def tearDown(self) -> None:\n\n del self.private_key\n del self.pem_private_key\n del self.pem_public_key\n del self.encrypted_pem_private_key",
"def _delKey(self, key):\n pass",
"def discard(self, key: KT) -> None:\n discard(self, key)",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def delkey(confirm, pub):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n mph.wallet.removePrivateKeyFromPublicKey(pub)\n set_shared_morphene_instance(stm)",
"def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)",
"def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)",
"def unlink(self):\r\n try:\r\n deleteSenderPublicKey(self)\r\n del self._privateKey\r\n del self._secondPrivateKey\r\n except Exception:\r\n pass",
"def deleteKey(self, key):\n key.delete()",
"def eliminate_key (self,key):\r\n\r\n if self.using_shelf:\r\n\r\n del self.key_dict[str(key)]",
"def unfunc(ciphertext, key):\n plaintext = xor(ciphertext, key)\n return plaintext",
"def removeAllKeys(self) -> None:\n ...",
"def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None",
"def tearDown(self):\n if self.keypair_creator:\n self.keypair_creator.clean()\n\n try:\n os.remove(pub_file_path)\n except:\n pass\n\n try:\n os.remove(priv_file_path)\n except:\n pass",
"def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt",
"def remove_key(attr):\n pm.cutKey(attr, clear=True, time=pm.currentTime())",
"def unset(self, key: str) -> Any:\n return self.client.delete(self._url(key))",
"def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'",
"def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'",
"def do_ios_decryption(self):\r\n try:\r\n self.aes_decryption_key = self.extract_aes_key()\r\n except DecryptionKeyInvalidError:\r\n self.aes_decryption_key = self.get_backup_encryption_key()\r\n self.used_ios_decryption_key_cache = True\r\n \r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)",
"def remove_key(self,key):\n public_key = key\n try: public_key = key.public_key()\n except: pass\n\n serialized = public_key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = serialized.split(None,2)[1]\n data = b64decode(blob)\n\n message = WriteMessage()\n message.write_uint8(constants.request.SSH_AGENTC_REMOVE_IDENTITY)\n message.write_binary(data)\n self.connection.send_message(message.data)\n self._await_operation_result()",
"def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None\n self.prev_attn = None",
"def __del__(self):\n if self.key_buffer:\n del self.key_buffer\n if self.val_buffer:\n del self.val_buffer\n castle_disconnect(self.conn)\n pycastle_log.info(str(self)+\" Destroyed connection\")",
"def remove_key(self, key):\n del self.data[key]\n self.save_data()",
"def delete(self, key):",
"def dec(self, key):\n if key not in self.key_dict:\n return\n self.decrease(key)",
"def remove(self, key):"
] | [
"0.79658395",
"0.6650825",
"0.6479646",
"0.64384425",
"0.62364286",
"0.6223678",
"0.62217623",
"0.61869067",
"0.61497605",
"0.6120932",
"0.6093871",
"0.60819805",
"0.60426354",
"0.6033569",
"0.6002724",
"0.5995064",
"0.5992837",
"0.5932019",
"0.59261566",
"0.5922496",
"0.58950895",
"0.5892319",
"0.584982",
"0.58491504",
"0.58487475",
"0.58323115",
"0.5752463",
"0.5746494",
"0.57420474",
"0.5729714"
] | 0.6914403 | 1 |
Set timer for key revocation | def _set_delete_timer(self, key_name, timeout):
if key_name is not None:
#print("(%d) _set_delete_timer:" % int(time.time()), key_name.hex()[:10], timeout)
query_management.QueryEntry(expire_after=timeout, callback_expire=remove_old_key,
data={KeyType.hint: key_name}, retry_count=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __updateElapsedTime(self):\n if self._keyCodeTime != 0.0 and \\\n (globalClock.getFrameTime() - self._keyCodeTime) >= self._timeout:\n self.notify.debug(\"Key code timed out. Resetting...\")\n self.reset()\n messenger.send(KeyCodes.CLEAR_CODE_EVENT)\n self._keyCodeTime = globalClock.getFrameTime()",
"def _api_timer_expiration_handler(self):\n\n try:\n self._api_lock.release()\n except:\n pass",
"def renewKey():\n while True:\n try:\n sleep(RENEW_KEY)\n mutex.acquire()\n key_dict.clear()\n mutex.release()\n except:\n print(\"error in renew key\")\n finally:\n if mutex.locked():\n mutex.release()",
"def update_time(cls, key):\n key.put()",
"def set_invoke_timer(self, timeout, retry_entry=False):\n if self.timer_entry is not None and self.timer_entry.active:\n self.timer_entry.deactivate()\n #print(\"(%d) set_invoke_timer:\" % int(time.time()), timeout)\n self.timer_entry = query_management.QueryEntry(expire_after=timeout,\n callback_expire=self._perform_key_exchange,\n retry_count=0)\n if retry_entry:\n self.timer_entry.data[KeyType.retry_timer] = True",
"def revoke_refresh_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(days=cls._REFRESH_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')",
"def do_expire(self):\n # Deep copy to avoid RuntimeError: dictionary changed size during iteration\n _timeouts = deepcopy(self.timeouts)\n for key, value in _timeouts.items():\n if value - self.clock.now() < timedelta(0):\n del self.timeouts[key]\n # removing the expired key\n if key in self.redis:\n self.redis.pop(key, None)",
"def _expire_item(self, key):\n (timeout, callback) = self._timeouts[key]\n now = time.time()\n if timeout <= now:\n item = dict.pop(self, key)\n del self._timeouts[key]\n if callback:\n try:\n callback(key, item)\n except TypeError:\n try:\n callback(key)\n except TypeError:\n callback()\n return None\n else:\n return timeout - now",
"def clean_timer(sc):\n global prev_dict_\n # Cleaning the previous dictionary after 5 hours\n prev_dict_ = {}\n z.enter(18000, 1, clean_timer, (sc,))",
"def test_rotate_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 80, 20, 19)\n assert key.audit_state == 'old'",
"def setDeactivationTime(*argv):",
"def synictimer(self, synictimer):\n\n self._synictimer = synictimer",
"async def _expire(self, key, ttl):\n return await self.client.touch(key, ttl)",
"def set_timer(self, update: Update, context: CallbackContext) -> None:\n chat_id = update.message.chat_id\n try:\n due = int(context.user_data[\"duration\"])\n if due < 0:\n # update.message.reply_text('Sorry we can not go back to future!')\n return\n\n job_removed = self.remove_job_if_exists(str(chat_id), context)\n context.job_queue.run_once(\n self.command_stop_irrigation, due, context=chat_id, name=str(chat_id)\n )\n\n # text = 'Timer successfully set!'\n # if job_removed:\n # text += ' Old one was removed.'\n # update.message.reply_text(text)\n\n except (IndexError, ValueError):\n # update.message.reply_text('Usage: /set <seconds>')\n update.message.reply_text(\"Erro ao agendar o desligamento da irrigação 😞\")",
"def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)",
"def revoke_access_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(minutes=cls._ACCESS_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')",
"def removeKey(self, timeOrHash) -> None:\n ...",
"async def _expire(self, key, ttl):\n if key in SimpleMemoryBackend._cache:\n handle = SimpleMemoryBackend._handlers.pop(key, None)\n if handle:\n handle.cancel()\n if ttl:\n loop = asyncio.get_event_loop()\n SimpleMemoryBackend._handlers[key] = loop.call_later(ttl, self.__delete, key)\n return True\n\n return False",
"async def _expire(self):\n # pylint: disable=protected-access\n await asyncio.sleep(const.DEFAULT_COMMAND_EXPIRATION)\n self.set(None)",
"def expire(self):\n logging.debug(\"Expiring token as wanted...\")\n self.expiration = datetime.now() - timedelta(seconds=(10))",
"def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'",
"def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})",
"def on_expire(self):\n pass",
"def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0",
"def set_timer(self, update: Update, context: CallbackContext) -> None:\n chat_id = update.message.chat_id\n try:\n due = int(context.user_data[\"duration\"])\n if due < 0:\n return\n\n job_removed = self.remove_job_if_exists(str(chat_id), context)\n context.job_queue.run_once(\n self.command_stop_irrigation, due, context=chat_id, name=str(chat_id)\n )\n\n except (IndexError, ValueError):\n update.message.reply_text(\"Erro ao agendar o desligamento da irrigação 😞\")",
"def _rescheduleFromRun(self, newTime):\n if newTime is None:\n self.deleteFromStore()\n else:\n self.time = newTime",
"def on_expiration_time(self, alarm) -> None:\r\n return",
"async def _reset_time(self, request_id: int) -> int:\n raise NotImplementedError()",
"def attempt(self, timer, context, phases):",
"def stop_timer(self):\n self.end_time = datetime.now()"
] | [
"0.6193708",
"0.6188828",
"0.61632437",
"0.61356395",
"0.6049411",
"0.5949552",
"0.5939341",
"0.58892876",
"0.5877896",
"0.584273",
"0.58422077",
"0.5806897",
"0.57887334",
"0.57828623",
"0.5779299",
"0.5777707",
"0.571499",
"0.5553746",
"0.55466074",
"0.5524556",
"0.5522834",
"0.55197686",
"0.55180275",
"0.55080485",
"0.5502418",
"0.5501438",
"0.5490628",
"0.54829204",
"0.54818016",
"0.54720414"
] | 0.6783776 | 0 |
Returns the presence for this channel | def presence(self, params=None, timeout=None):
params = params or {}
path = '/channels/%s/presence' % self.__name
return self.__ably._get(path, params=params, timeout=timeout).json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def presence(self):\n return self.slack_client.api_call(\"users.getPresence?user=\"+self.user_id)",
"def isHumanPresence(self):\n\t\treturn self.humanPresence",
"def online(self):\n api_call = self.presence()\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n return api_call.get('online')\n return None",
"def Presence(self, *args, **kwargs):\n return Presence(self, *args, **kwargs)",
"def ready(self):\n return self._channel.recv_ready()",
"def is_open(self, channel=None):\n return self.get_state(channel)",
"def getChannelResponse(self):\n \n \n return self.channel_response",
"def customers_presence(self):\n return self._customers_presence",
"def connected_channel(self):\n if not self.channel_id:\n return None\n\n return self._bot.get_channel(int(self.channel_id))",
"async def loop_presence(self):\n # TODO: Does this even work?\n presence = await self.set_presence()\n logger.debug(f'{presence[\"activity\"][1]} {presence[\"status\"][1]}')",
"async def check_na_channel(self, guild: discord.Guild):\n\n ch_id = await self.config.guild(guild).na_channel_id()\n\n if ch_id:\n return discord.utils.get(guild.text_channels, id=ch_id)\n return False",
"def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False",
"def check_presence(user):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.users_getPresence(user=user)\n assert response['ok'] is True\n if response['presence'] == 'active':\n return True\n else:\n return False\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None",
"def is_present(self):\n return self._is_present()",
"def check(self):\n return self.connected",
"def connected(self):\n return self._periph.connected",
"def channel_is_streaming(self, channel_name = ''): \n \n self.get_stream(channel_name)\n stream_json = self.stream['stream']\n if stream_json is None:\n return False\n else:\n print(stream_json['channel']['name'])\n print(stream_json['game'])\n print(stream_json['viewers'])\n print(stream_json['created_at'])\n return True",
"def messages_in_channel(self, client, channel):\n result = None\n if client not in self.storage:\n return result\n if channel not in self.storage[client]:\n return result\n result = len(self.storage[client][channel])\n return result",
"def is_connected(self):\n return self.connected_channel is not None",
"def get(self, public_id):\n channel = get_channel_state(public_id)\n if not channel:\n api.abort(404)\n else:\n return channel",
"def handle_groupchat_presence(self, pr):\n got_offline = False\n got_online = False\n if pr['muc']['room'] not in self.rooms.keys():\n return\n entry = pr['muc'].getStanzaValues()\n entry['show'] = pr['show']\n entry['status'] = pr['status']\n entry['alt_nick'] = pr['nick']\n if pr['type'] == 'unavailable':\n if entry['nick'] in self.rooms[entry['room']]:\n del self.rooms[entry['room']][entry['nick']]\n if '{}/{}'.format(entry['room'], entry['nick']) == self.getOurJidInRoom(entry['room']):\n log.debug(\"I got kicked :( from %s\" % entry['room'])\n del self.rooms[entry['room']]\n got_offline = True\n else:\n if entry['nick'] not in self.rooms[entry['room']]:\n got_online = True\n self.rooms[entry['room']][entry['nick']] = entry\n log.debug(\"MUC presence from %s/%s : %s\", entry['room'],entry['nick'], entry)\n self.xmpp.event(\"groupchat_presence\", pr)\n self.xmpp.event(\"muc::%s::presence\" % entry['room'], pr)\n if got_offline:\n self.xmpp.event(\"muc::%s::got_offline\" % entry['room'], pr)\n if got_online:\n self.xmpp.event(\"muc::%s::got_online\" % entry['room'], pr)",
"def is_channel(self):\n return True",
"def single_channel():\n return True",
"def channels_playing(self):\n channels = c_int()\n real = c_int()\n ckresult(\n _dll.FMOD_System_GetChannelsPlaying(self._ptr, byref(channels), byref(real))\n )\n return so(channels=channels.value, real_channels=real.value)",
"def exists(self):\n logging.warning(\n \"IRC back-end does not support determining if a room exists. \"\n \"Returning the result of joined instead.\"\n )\n return self.joined",
"def on_presence_updated(self, e):\n self.presence = e.presence if e.presence is not None else 'online'",
"def get_status(self):\n if self.__db.channel_exists('{}.today.1_0'.format(self.__dest_code)):\n self.__db.sync_today_channel()\n # maybe just sync this channel? and do same for previous methods\n else:\n self.__db.create_today_channel('{}.today.1_0'.format(self.__dest_code))\n\n conn = sqlite3.connect(self.__db.db_path)\n c = conn.cursor()\n\n today_data = c.execute(\"\"\"SELECT body FROM sync WHERE id = '{}.today.1_0.{}'\"\"\".format(self.__dest_code, self.__entityType)).fetchone()\n\n if today_data is None:\n return None\n else:\n body = json.loads(today_data[0])\n try:\n return body['facilities'][str(self.__id) + ';entityType=' + self.__entityType][0]['scheduleType']\n except:\n return None",
"def is_active(self, channel):\n return bool(int(self.bus.ask('sel:%s?' % channel)))",
"async def get_widget_info(self) -> 'typing.Tuple[bool, typing.Union[None, channel.Channel]]':\n info = await self._bot.http.get_widget_status(self.id)\n return info.get(\"enabled\", False), self.channels.get(int(info.get(\"channel_id\", 0)))",
"def getChannel(self):\r\n return self.channel"
] | [
"0.78225",
"0.65050405",
"0.63529664",
"0.6173057",
"0.5725983",
"0.5710549",
"0.5505179",
"0.54912657",
"0.54869306",
"0.54740065",
"0.5472166",
"0.5465967",
"0.5449083",
"0.5377885",
"0.5373487",
"0.5356357",
"0.534464",
"0.53218323",
"0.52983725",
"0.52873003",
"0.528721",
"0.5279791",
"0.52772075",
"0.5270897",
"0.52548885",
"0.5254141",
"0.523491",
"0.5232872",
"0.5229846",
"0.52239484"
] | 0.80038244 | 0 |
Get an existing Assessment resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AssessmentArgs.__new__(AssessmentArgs)
__props__.__dict__["additional_data"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["links"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partners_data"] = None
__props__.__dict__["resource_details"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
return Assessment(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"additional_data\"] = None\n __props__[\"display_name\"] = None\n __props__[\"links\"] = None\n __props__[\"metadata\"] = None\n __props__[\"name\"] = None\n __props__[\"partners_data\"] = None\n __props__[\"resource_details\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n implementation_effort: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n remediation_description: Optional[pulumi.Input[str]] = None,\n severity: Optional[pulumi.Input[str]] = None,\n threats: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n user_impact: Optional[pulumi.Input[str]] = None) -> 'AssessmentPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AssessmentPolicyState.__new__(_AssessmentPolicyState)\n\n __props__.__dict__[\"categories\"] = categories\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"implementation_effort\"] = implementation_effort\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"remediation_description\"] = remediation_description\n __props__.__dict__[\"severity\"] = severity\n __props__.__dict__[\"threats\"] = threats\n __props__.__dict__[\"user_impact\"] = user_impact\n return AssessmentPolicy(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get_assessment(assessment_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssessmentResult:\n __args__ = dict()\n __args__['assessmentId'] = assessment_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws-native:auditmanager:getAssessment', __args__, opts=opts, typ=GetAssessmentResult).value\n\n return AwaitableGetAssessmentResult(\n arn=pulumi.get(__ret__, 'arn'),\n assessment_id=pulumi.get(__ret__, 'assessment_id'),\n assessment_reports_destination=pulumi.get(__ret__, 'assessment_reports_destination'),\n creation_time=pulumi.get(__ret__, 'creation_time'),\n delegations=pulumi.get(__ret__, 'delegations'),\n roles=pulumi.get(__ret__, 'roles'),\n scope=pulumi.get(__ret__, 'scope'),\n status=pulumi.get(__ret__, 'status'),\n tags=pulumi.get(__ret__, 'tags'))",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None) -> 'InfraAlertCondition':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InfraAlertConditionState.__new__(_InfraAlertConditionState)\n\n __props__.__dict__[\"comparison\"] = comparison\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"critical\"] = critical\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"entity_guid\"] = entity_guid\n __props__.__dict__[\"event\"] = event\n __props__.__dict__[\"integration_provider\"] = integration_provider\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"policy_id\"] = policy_id\n __props__.__dict__[\"process_where\"] = process_where\n __props__.__dict__[\"runbook_url\"] = runbook_url\n __props__.__dict__[\"select\"] = select\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"updated_at\"] = updated_at\n __props__.__dict__[\"violation_close_timer\"] = violation_close_timer\n __props__.__dict__[\"warning\"] = warning\n __props__.__dict__[\"where\"] = where\n return InfraAlertCondition(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)",
"def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)",
"def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)",
"def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Workflow':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = WorkflowArgs.__new__(WorkflowArgs)\n\n __props__.__dict__[\"acr\"] = None\n __props__.__dict__[\"aks_resource_id\"] = None\n __props__.__dict__[\"app_name\"] = None\n __props__.__dict__[\"auth_status\"] = None\n __props__.__dict__[\"branch_name\"] = None\n __props__.__dict__[\"builder_version\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"docker_build_context\"] = None\n __props__.__dict__[\"dockerfile\"] = None\n __props__.__dict__[\"dockerfile_generation_mode\"] = None\n __props__.__dict__[\"dockerfile_output_directory\"] = None\n __props__.__dict__[\"generation_language\"] = None\n __props__.__dict__[\"image_name\"] = None\n __props__.__dict__[\"image_tag\"] = None\n __props__.__dict__[\"language_version\"] = None\n __props__.__dict__[\"last_workflow_run\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"manifest_generation_mode\"] = None\n __props__.__dict__[\"manifest_output_directory\"] = None\n __props__.__dict__[\"manifest_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"namespace\"] = None\n __props__.__dict__[\"oidc_credentials\"] = None\n __props__.__dict__[\"port\"] = None\n __props__.__dict__[\"pr_status\"] = None\n __props__.__dict__[\"pr_url\"] = None\n __props__.__dict__[\"pull_number\"] = None\n __props__.__dict__[\"repository_name\"] = None\n __props__.__dict__[\"repository_owner\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Workflow(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n as_path_match_mode: Optional[pulumi.Input[str]] = None,\n cen_id: Optional[pulumi.Input[str]] = None,\n cen_region_id: Optional[pulumi.Input[str]] = None,\n cidr_match_mode: Optional[pulumi.Input[str]] = None,\n community_match_mode: Optional[pulumi.Input[str]] = None,\n community_operate_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n destination_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n map_result: Optional[pulumi.Input[str]] = None,\n match_asns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n match_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n next_priority: Optional[pulumi.Input[int]] = None,\n operate_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preference: Optional[pulumi.Input[int]] = None,\n prepend_as_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n route_map_id: Optional[pulumi.Input[str]] = None,\n route_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n source_region_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_route_table_id: Optional[pulumi.Input[str]] = None,\n transmit_direction: Optional[pulumi.Input[str]] = None) -> 'RouteMap':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouteMapState.__new__(_RouteMapState)\n\n __props__.__dict__[\"as_path_match_mode\"] = as_path_match_mode\n __props__.__dict__[\"cen_id\"] = cen_id\n __props__.__dict__[\"cen_region_id\"] = cen_region_id\n __props__.__dict__[\"cidr_match_mode\"] = cidr_match_mode\n __props__.__dict__[\"community_match_mode\"] = community_match_mode\n __props__.__dict__[\"community_operate_mode\"] = community_operate_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"destination_child_instance_types\"] = destination_child_instance_types\n __props__.__dict__[\"destination_cidr_blocks\"] = destination_cidr_blocks\n __props__.__dict__[\"destination_instance_ids\"] = destination_instance_ids\n __props__.__dict__[\"destination_instance_ids_reverse_match\"] = destination_instance_ids_reverse_match\n __props__.__dict__[\"destination_route_table_ids\"] = destination_route_table_ids\n __props__.__dict__[\"map_result\"] = map_result\n __props__.__dict__[\"match_asns\"] = match_asns\n __props__.__dict__[\"match_community_sets\"] = match_community_sets\n __props__.__dict__[\"next_priority\"] = next_priority\n __props__.__dict__[\"operate_community_sets\"] = operate_community_sets\n __props__.__dict__[\"preference\"] = preference\n __props__.__dict__[\"prepend_as_paths\"] = prepend_as_paths\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"route_map_id\"] = route_map_id\n __props__.__dict__[\"route_types\"] = route_types\n __props__.__dict__[\"source_child_instance_types\"] = source_child_instance_types\n __props__.__dict__[\"source_instance_ids\"] = source_instance_ids\n __props__.__dict__[\"source_instance_ids_reverse_match\"] = source_instance_ids_reverse_match\n __props__.__dict__[\"source_region_ids\"] = source_region_ids\n __props__.__dict__[\"source_route_table_ids\"] = source_route_table_ids\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"transit_router_route_table_id\"] = transit_router_route_table_id\n __props__.__dict__[\"transmit_direction\"] = transmit_direction\n return RouteMap(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,\n accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,\n accessibility_self_service: Optional[pulumi.Input[bool]] = None,\n admin_note: Optional[pulumi.Input[str]] = None,\n app_links_json: Optional[pulumi.Input[str]] = None,\n app_settings_json: Optional[pulumi.Input[str]] = None,\n authentication_policy: Optional[pulumi.Input[str]] = None,\n auto_key_rotation: Optional[pulumi.Input[bool]] = None,\n auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,\n client_basic_secret: Optional[pulumi.Input[str]] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n client_uri: Optional[pulumi.Input[str]] = None,\n consent_method: Optional[pulumi.Input[str]] = None,\n enduser_note: Optional[pulumi.Input[str]] = None,\n grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n groups_claim: Optional[pulumi.Input[pulumi.InputType['OAuthGroupsClaimArgs']]] = None,\n hide_ios: Optional[pulumi.Input[bool]] = None,\n hide_web: Optional[pulumi.Input[bool]] = None,\n implicit_assignment: Optional[pulumi.Input[bool]] = None,\n issuer_mode: Optional[pulumi.Input[str]] = None,\n jwks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OAuthJwkArgs']]]]] = None,\n jwks_uri: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n login_mode: Optional[pulumi.Input[str]] = None,\n login_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n login_uri: Optional[pulumi.Input[str]] = None,\n logo: Optional[pulumi.Input[str]] = None,\n logo_uri: Optional[pulumi.Input[str]] = None,\n logo_url: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n omit_secret: Optional[pulumi.Input[bool]] = None,\n pkce_required: Optional[pulumi.Input[bool]] = None,\n policy_uri: Optional[pulumi.Input[str]] = None,\n post_logout_redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n profile: Optional[pulumi.Input[str]] = None,\n redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n refresh_token_leeway: Optional[pulumi.Input[int]] = None,\n refresh_token_rotation: Optional[pulumi.Input[str]] = None,\n response_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n sign_on_mode: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,\n tos_uri: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_name_template: Optional[pulumi.Input[str]] = None,\n user_name_template_push_status: Optional[pulumi.Input[str]] = None,\n user_name_template_suffix: Optional[pulumi.Input[str]] = None,\n user_name_template_type: Optional[pulumi.Input[str]] = None,\n wildcard_redirect: Optional[pulumi.Input[str]] = None) -> 'OAuth':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _OAuthState.__new__(_OAuthState)\n\n __props__.__dict__[\"accessibility_error_redirect_url\"] = accessibility_error_redirect_url\n __props__.__dict__[\"accessibility_login_redirect_url\"] = accessibility_login_redirect_url\n __props__.__dict__[\"accessibility_self_service\"] = accessibility_self_service\n __props__.__dict__[\"admin_note\"] = admin_note\n __props__.__dict__[\"app_links_json\"] = app_links_json\n __props__.__dict__[\"app_settings_json\"] = app_settings_json\n __props__.__dict__[\"authentication_policy\"] = authentication_policy\n __props__.__dict__[\"auto_key_rotation\"] = auto_key_rotation\n __props__.__dict__[\"auto_submit_toolbar\"] = auto_submit_toolbar\n __props__.__dict__[\"client_basic_secret\"] = client_basic_secret\n __props__.__dict__[\"client_id\"] = client_id\n __props__.__dict__[\"client_secret\"] = client_secret\n __props__.__dict__[\"client_uri\"] = client_uri\n __props__.__dict__[\"consent_method\"] = consent_method\n __props__.__dict__[\"enduser_note\"] = enduser_note\n __props__.__dict__[\"grant_types\"] = grant_types\n __props__.__dict__[\"groups_claim\"] = groups_claim\n __props__.__dict__[\"hide_ios\"] = hide_ios\n __props__.__dict__[\"hide_web\"] = hide_web\n __props__.__dict__[\"implicit_assignment\"] = implicit_assignment\n __props__.__dict__[\"issuer_mode\"] = issuer_mode\n __props__.__dict__[\"jwks\"] = jwks\n __props__.__dict__[\"jwks_uri\"] = jwks_uri\n __props__.__dict__[\"label\"] = label\n __props__.__dict__[\"login_mode\"] = login_mode\n __props__.__dict__[\"login_scopes\"] = login_scopes\n __props__.__dict__[\"login_uri\"] = login_uri\n __props__.__dict__[\"logo\"] = logo\n __props__.__dict__[\"logo_uri\"] = logo_uri\n __props__.__dict__[\"logo_url\"] = logo_url\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"omit_secret\"] = omit_secret\n __props__.__dict__[\"pkce_required\"] = pkce_required\n __props__.__dict__[\"policy_uri\"] = policy_uri\n __props__.__dict__[\"post_logout_redirect_uris\"] = post_logout_redirect_uris\n __props__.__dict__[\"profile\"] = profile\n __props__.__dict__[\"redirect_uris\"] = redirect_uris\n __props__.__dict__[\"refresh_token_leeway\"] = refresh_token_leeway\n __props__.__dict__[\"refresh_token_rotation\"] = refresh_token_rotation\n __props__.__dict__[\"response_types\"] = response_types\n __props__.__dict__[\"sign_on_mode\"] = sign_on_mode\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"token_endpoint_auth_method\"] = token_endpoint_auth_method\n __props__.__dict__[\"tos_uri\"] = tos_uri\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"user_name_template\"] = user_name_template\n __props__.__dict__[\"user_name_template_push_status\"] = user_name_template_push_status\n __props__.__dict__[\"user_name_template_suffix\"] = user_name_template_suffix\n __props__.__dict__[\"user_name_template_type\"] = user_name_template_type\n __props__.__dict__[\"wildcard_redirect\"] = wildcard_redirect\n return OAuth(resource_name, opts=opts, __props__=__props__)",
"def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404"
] | [
"0.6976486",
"0.608855",
"0.5846438",
"0.5757699",
"0.5648362",
"0.55919707",
"0.5582545",
"0.55817664",
"0.55264586",
"0.55085826",
"0.550386",
"0.5496845",
"0.54861027",
"0.5467891",
"0.54414856",
"0.53757304",
"0.53509825",
"0.53232807",
"0.5311685",
"0.52735114",
"0.52480686",
"0.51748484",
"0.51741683",
"0.5172154",
"0.51583207",
"0.5127296",
"0.51180047",
"0.5104065",
"0.50640565",
"0.50514466"
] | 0.6898669 | 1 |
Links relevant to the assessment | def links(self) -> pulumi.Output['outputs.AssessmentLinksResponse']:
return pulumi.get(self, "links") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLink(self):",
"def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def href(self, request) -> str:\n raise NotImplementedError()",
"def get_absolute_url(self):\n return reverse('clinicalTrial-detail', args=[str(self.trialId)])",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def issueListing(self, v, i):\n #list of URLS within the issue\n# links = []\n issURL = self.link(vol = v, iss = i )\n html=urlopen(issURL)\n soup=BeautifulSoup(html,'html.parser')\n URLs = [] #Empty list\n \n# titles = soup.find_all('h5', class_=\"title\")\n# authors = soup.find_all('h6', class_=\"authors\")\n# pubs = soup.find_all('h6', class_=\"pub-info\")\n# for t, a, p in zip(titles, authors, pubs):\n blocks = soup.find_all('div', class_=\"article panel article-result\")\n for b in blocks:\n# print(b)\n titletag = b.find('h5', class_=\"title\")\n title = titletag.get_text()\n #Extract abstract url from title head\n aURL = titletag.find('a', href = True)['href']\n alink = 'https://journals.aps.org' + aURL\n #Print out the scraped information\n print(title)\n print(alink)\n #Extract research area and topic keywords\n kwlist = b.find('ul', class_=\"inline-list subjects\")\n #If the list tag exists\n if kwlist:\n lis = kwlist.find_all('li')\n kws = [li.get_text() for li in lis] \n print(kws)\n #Add utf-8 encode\n# print(kws.encode('utf-8')) \n print('----------------------------------------------------------------') \n #Collect URLs in the issue\n URLs.append('https://journals.aps.org' + aURL)\n return URLs",
"def test_accessible(self):\n survey = Survey.objects.get(id=2)\n responses = Response.objects.filter(survey=survey)\n response = responses.all()[0]\n urls = [\n reverse(\"survey-list\"),\n reverse(\"survey-detail\", kwargs={\"id\": 2}),\n reverse(\"survey-completed\", kwargs={\"id\": 2}),\n reverse(\"survey-detail-step\", kwargs={\"id\": 2, \"step\": 1}),\n reverse(\"survey-confirmation\", kwargs={\"uuid\": response.interview_uuid}),\n ]\n for url in urls:\n self.assert_accessible(url)",
"def check_index_and_outline(self, authed_client):\r\n index_url = '/course/'\r\n index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')\r\n parsed_html = lxml.html.fromstring(index_response.content)\r\n course_link_eles = parsed_html.find_class('course-link')\r\n self.assertGreaterEqual(len(course_link_eles), 2)\r\n for link in course_link_eles:\r\n self.assertRegexpMatches(\r\n link.get(\"href\"),\r\n 'course/slashes:{0}'.format(Locator.ALLOWED_ID_CHARS)\r\n )\r\n # now test that url\r\n outline_response = authed_client.get(link.get(\"href\"), {}, HTTP_ACCEPT='text/html')\r\n # ensure it has the expected 2 self referential links\r\n outline_parsed = lxml.html.fromstring(outline_response.content)\r\n outline_link = outline_parsed.find_class('course-link')[0]\r\n self.assertEqual(outline_link.get(\"href\"), link.get(\"href\"))\r\n course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]\r\n self.assertEqual(course_menu_link.find(\"a\").get(\"href\"), link.get(\"href\"))",
"def get_absolute_url(self):\n return reverse('subject-detail', args=[str(self.id)])",
"def test_animais_list_link(self):\n PropriedadeUser.objects.create(propriedade=self.propriedade1,\n user=self.user1,\n owner=True)\n login = self.client.login(username='user1', password='12345')\n response = self.client.get(reverse('animal_pesagem_form', kwargs={'animal_pk': self.animal.pk,}))\n expected = 'href=\"{}\"'.format(reverse('animais_list', kwargs={'propriedade_pk': self.animal.propriedade.pk,}))\n self.assertContains(response, expected)",
"async def link_to(self, *args):\n pass",
"def link_residues(self) -> None:\n ...",
"def get_absolute_url(self):\n return reverse('curriculum_guides:curriculum_guide', args=[self.slug])",
"def test_dashboards_v2_link(self):\n pass",
"def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError",
"def get_success_url(self):\n return reverse_lazy('grades:list') + '?ok'",
"def handout_links(self):\r\n return self.q(css='section.handouts ol li a').map(lambda el: el.get_attribute('href')).results",
"def links(self) -> str:\n return pulumi.get(self, \"links\")",
"def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url",
"def links(request):\n cart = Cart(request)\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/links.html',\n {\n \t'cart': cart,\n 'title':'Полезные ресурсы',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )",
"def LinkAnat(self):\n\n if self.anatomical is None:\n return\n for entry in self.info.keys():\n info = self.info[entry]\n if info.has_key('anat_link'):\n self.LinkFiles(info['outdir'], self.anatomical)",
"def _link_items(self):\n pass",
"def trigger_assessment():\n\n data_api_client.req.assessments().post(data={\n 'assessment': {\n 'brief_id': request.form['brief_id'],\n 'domain_name': request.form['domain_name'],\n 'supplier_code': request.form['supplier_code']\n },\n 'update_details': {\n 'updated_by': ''\n }\n })\n\n return redirect(url_for('.assessments_review'))",
"def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n yield base_url",
"def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])",
"def link(self, s_id):\r\n\r\n # Take the link entires from TOML file\r\n schedules = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if schedules:\r\n for entries in schedules:\r\n # Construct payload \r\n for payload in entries.get('link'):\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/link.json', test=payload)\r\n # Post request\r\n if 'id' in self.schedules[-1]:\r\n payload['schedule'] = self.schedules[-1].get('id')\r\n if 'id' in self.workouts[-1]:\r\n payload['workout'] = self.workouts[-1].get('id')\r\n return self.add_post(payload, API.url_link, self.links)",
"def methods():\n list_groups_text = '<a href=\"/groups\">List Groups</a>'\n list_users_text = '<a href=\"/users\">List Users</a>'\n page_links = list_groups_text + \"<br>\" + list_users_text\n return page_links",
"def get_name_link_html(self):\n url_text = \"{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} \".format(self.pk)\n return URL_HTML.format(url=url_text, name=self.i_study_name)",
"def to_projectlink(self):\n\n thumb_image_url = reverse('project_serve_file', args=[self.short_name,self.logo])\n\n args = {\"abreviation\":self.short_name,\n \"title\":self.short_name,\n \"description\":self.description,\n \"URL\":reverse('comicsite.views.site', args=[self.short_name]),\n \"download URL\":\"\",\n \"submission URL\":self.get_submission_URL(),\n \"event name\":self.event_name,\n \"year\":\"\",\n \"event URL\":self.event_url,\n \"image URL\":self.logo,\n \"thumb_image_url\":thumb_image_url,\n \"website section\":\"active challenges\",\n \"overview article url\":self.publication_url,\n \"overview article journal\":self.publication_journal_name,\n \"overview article citations\":\"\",\n \"overview article date\":\"\",\n \"submission deadline\":\"\",\n \"workshop date\":self.workshop_date,\n \"open for submission\":\"yes\" if self.is_open_for_submissions else \"no\",\n \"data download\":\"yes\" if self.offers_data_download else \"no\",\n \"dataset downloads\":self.number_of_downloads,\n \"registered teams\":\"\",\n \"submitted results\":self.number_of_submissions,\n \"last submission date\":self.last_submission_date,\n \"hosted on comic\":True,\n \"created at\":self.created_at\n }\n\n projectlink = ProjectLink(args)\n return projectlink"
] | [
"0.6158211",
"0.5980082",
"0.5797147",
"0.57784724",
"0.5716922",
"0.5695942",
"0.56922674",
"0.56786233",
"0.5670207",
"0.5577552",
"0.5543779",
"0.5527378",
"0.5521695",
"0.5521499",
"0.5518003",
"0.55149287",
"0.551347",
"0.5483872",
"0.5457637",
"0.5448972",
"0.5433619",
"0.5413031",
"0.54111266",
"0.5382787",
"0.53820586",
"0.5381424",
"0.53743935",
"0.53675365",
"0.5366903",
"0.5363985"
] | 0.65279543 | 0 |
Describes properties of an assessment metadata. | def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:
return pulumi.get(self, "metadata") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metadata(self) -> global___SummaryMetadata:",
"def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]:\n return pulumi.get(self, \"metadata\")",
"def get_assessment_metadata(self):\n return Metadata(**settings.METADATA['assessment_id'])",
"def describe(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n property_info = {'child_properties': self.child_properties,\n 'descendant_properties': self.descendant_properties,\n 'parent_properties': self.parent_properties,\n 'domain': self.domain,\n 'range': self.range,\n 'uri': self.uri,\n 'label': self.label,\n 'description': self.description}\n return property_info",
"def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")",
"def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta",
"def description(self):\n desc = self.title\n ops = []\n for attribute in self.attributes.all():\n value = attribute.value\n if isinstance(value, list):\n ops.append(\n \"%s = '%s'\" % (attribute.type, (\", \".join([str(v) for v in value])))\n )\n else:\n ops.append(\"%s = '%s'\" % (attribute.type, value))\n if ops:\n desc = \"%s (%s)\" % (desc, \", \".join(ops))\n return desc",
"def metadata(self):\n return {\n \"wildtype\" : self.wildtype,\n \"genotypes\" : self.genotypes,\n \"phenotypes\" : self.Raw.phenotypes,\n \"stdeviations\" : self.stdeviations,\n \"n_replicates\" : self.n_replicates,\n \"mutations\" : self.mutations,\n \"log_transform\" : self.log_transform,\n \"order\" : self.order,\n \"epistasis\" : {\n \"keys\" : self.epistasis.keys,\n \"values\" : self.epistasis.values,\n }\n }",
"def describe(self):\r\n mdataset_description = {\r\n 'kind': \"HConteiner\",\r\n 'compliance': self._compliance,\r\n 'has_encryption': self.has_encryption,\r\n 'encryption': self._encryption,\r\n 'sensitive': self._sensitive,\r\n 'license': self._license,\r\n }\r\n verbose_event()\r\n return mdataset_description",
"def propertyDetails(self):\n return (PROPERTY_DETAILS.get(aa, NONE) for aa in self.sequence)",
"def get_assessments_metadata(self):\n return Metadata(**settings.METADATA['assessment_ids'])",
"def description(self) -> str:\n return self._search_in_properties(ATTR_DESCRIPTION)",
"def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template",
"def metadata(self): # -> None:\n ...",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def __repr__(self):\n return self._metadata.__str__()",
"def _short_info(self) -> str:\n nullable = \"Nullable \" if self._is_nullable else \"\"\n\n # Good candidate for python pattern matching once <3.10 support no longer required\n num_metadata_items = len(self.__metadata)\n if num_metadata_items == 0:\n metadata = \"\"\n elif num_metadata_items == 1:\n metadata = f\" [with {num_metadata_items} metadata item]\"\n else:\n metadata = f\" [with {num_metadata_items} metadata items]\"\n\n return f\"<{nullable}{self.__class__.__name__}{metadata}: {self._resolve_field_name()}>\"",
"def metadata(self) -> dict:\n meta = {}\n meta['filename'] = self.filename\n meta['label'] = self.label\n meta['url'] = self.url\n\n return meta",
"def meta(self):\n title = 'Месторасположение: {0}'.format(self.object.emplacement)\n return {\n 'title': title\n }",
"def __metadata__(self):\n raise NotImplementedError",
"def metadata(self) -> dict:\n meta = {}\n meta['name'] = self.name\n meta['id'] = self.id\n meta['family'] = self.family\n \n meta['ptd_type'] = []\n meta['pos'] = []\n meta['atype'] = []\n meta['db_vect'] = []\n meta['scale'] = []\n for cp in self.parameters:\n meta['ptd_type'].append(cp.get('ptd_type', None))\n meta['pos'].append(cp.get('pos', None))\n meta['atype'].append(cp.get('atype', None))\n meta['db_vect'].append(cp.get('db_vect', None))\n meta['scale'].append(cp.get('scale', None))\n \n return meta",
"def description(self):",
"def describe(self):\n raise NotImplementedError()",
"def describe(self):\n raise NotImplementedError()",
"def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict",
"def describe(self) -> str:",
"def __repr__(self):\n\n return self._metadata.__str__()",
"def description(self):\n pass",
"def description(self):\n pass"
] | [
"0.64961946",
"0.64541817",
"0.6290093",
"0.6145428",
"0.6084069",
"0.6051594",
"0.60471356",
"0.5994781",
"0.5933389",
"0.59084606",
"0.58996195",
"0.5870387",
"0.5848383",
"0.5837634",
"0.5826372",
"0.5826372",
"0.5818724",
"0.58181715",
"0.5814273",
"0.5807516",
"0.58013815",
"0.57757455",
"0.576439",
"0.57614464",
"0.57614464",
"0.57312673",
"0.5714556",
"0.5714087",
"0.56985253",
"0.56985253"
] | 0.6462961 | 1 |
Details of the resource that was assessed | def resource_details(self) -> pulumi.Output[Any]:
return pulumi.get(self, "resource_details") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_resource_details (self):\n return (f\"[Title:\\\"{self.get_title()}\\\"] [Author:{self.get_author()}] [Publisher:{self.get_publisher()}] [Year:{self.get_year()}]\")",
"def resource(self):\n return str(self._resource)",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def resource(self):\n return self._resource",
"def details(self):\n pass",
"def __str__(self):\n return self.resource.__name__",
"def __str__(self):\n return self.__resource;",
"def __str__(self):\n\n return str(self.__resource);",
"def meta_data(self):\r\n return simplejson.dumps(self.__resource_meta)",
"def detail(self):\n info = self.info()\n return info",
"def PrintResource(resource):\n print resource.resource_id.text, resource.GetResourceType()",
"def resource_details(self) -> pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']]:\n return pulumi.get(self, \"resource_details\")",
"def details(self):\n raise NotImplementedError()",
"def getResource(self):\n pass;",
"def resourceid(self):",
"def info(self) -> dict:",
"def info(self):\n return self.__dict__[self.sid]",
"def info(self):",
"def info(self):",
"def resource(self):\n return self.properties.get('resource',\n Entity(self.context, ResourcePath(\"resource\", self.resource_path)))",
"def details(self):\n return self._details",
"def _get_information(self):\n pass",
"def get_resource(self):\n msg = _(\"wrote a new wechat article : %(title)s\") % {\n 'title': self.title}\n msg = unicode(msg)\n resource = {\n 'title': msg,\n 'description': self.get_digest(),\n 'url': self.get_absolute_url(),\n 'image_url': self.cover_img.url,\n }\n return resource",
"def info(self, resource, id):\n return self.request('/' + resource + '/' + str(id))",
"def info(self):\n self._info()"
] | [
"0.76440537",
"0.7001793",
"0.6875115",
"0.6875115",
"0.6875115",
"0.6875115",
"0.6875115",
"0.6875115",
"0.6875115",
"0.686545",
"0.6756598",
"0.6750164",
"0.66482615",
"0.6627125",
"0.6553682",
"0.6521818",
"0.6483526",
"0.64652663",
"0.6450048",
"0.6442991",
"0.63966125",
"0.63947666",
"0.6393753",
"0.6393753",
"0.63681227",
"0.6352751",
"0.6350844",
"0.6317342",
"0.6305615",
"0.6295532"
] | 0.7746103 | 0 |
Test get_type_for_key_path with Simple Key Path | def test_get_type_for_key_path_simple_path(test_schema):
assert get_type_for_key_path(test_schema, "Age") == "integer" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def GetKeyByPath(self, key_path):",
"def _generic_test(self, pathstr, expected):\n self.assertEqual(self._get_pe_key(pathstr), expected)",
"def key_type(self) -> global___Type:",
"def type(path):",
"def get(self, key, key_type=None):\n pass",
"def GetSubkeyByPath(self, key_path):",
"def test_getKey_tmpfile(self):\n filename = self.mktemp()\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))",
"def test_get_transaction_types_key(self):\n pass",
"def load_key(self, type, keyid):\n pass",
"def test_get_contact_person_types_key(self):\n pass",
"def test_get_types(self):\n pass",
"def load_key():",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))",
"def setKeyPath(*args, **kwargs)->List[AnyStr]:\n pass",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def _load_key(client, entity_type, entity_id=None, parent_key=None):\n\n key = None\n if entity_id:\n key = client.key(entity_type, entity_id, parent=parent_key)\n else:\n # this will generate an ID\n key = client.key(entity_type)\n return key",
"def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False",
"def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")",
"def read_key(path_to: str) -> str:\n m_type, _ = guess_type(path_to)\n if m_type == types_map['.txt']:\n with open(path_to, 'r') as api_token_file:\n return api_token_file.read().strip()\n\n else:\n return path_to",
"def key_type(self):\n raise exceptions.NotImplementedError()",
"def testPath(self):\n self.cache._GetKeyPath.return_value = '/foo/bar'\n\n ref = cache.CacheReference(self.cache, 'key')\n self.assertEqual(ref.path, '/foo/bar')\n\n self.cache._GetKeyPath.assert_called_once_with('key')",
"def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]",
"def make_asset_key(self, asset_type, path):\r\n raise NotImplementedError()",
"def readKey(self, keyPath):\n\t\ttry:",
"def test_get_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n for key, value in pairs.items():\n tempconfig.write(\"{0}: {1}\\n\".format(\n key, value).encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, \"Spam\"), value)\n for key, value in pairs.items():\n self.assertEqual(config.getRequiredKey(key), value)\n finally:\n os.remove(tempconfig.name)",
"def getkey(attrstr, paths=None, prompt=True, promptpass=False):\n paths = paths or DEFAULT_PATHS\n for path in paths:\n filepath = os.path.expanduser(path)\n if not os.path.exists(filepath):\n continue\n with open(filepath, 'r') as handle:\n value = rget(json.load(handle), attrstr)\n if value is None:\n continue\n if isinstance(value, dict):\n raise Exception(f'Ambiguous key: {attrstr}')\n if isinstance(value, list):\n return value\n if not isinstance(value, str):\n return value\n if not value.startswith('b64:'):\n return value\n return b64decode(value[4:]).decode('utf8')\n promptfunc = getpass if promptpass else input\n if prompt:\n return promptfunc(f'Enter {attrstr}: ')\n pathstr = '\\n' + '\\n'.join(paths)\n raise Exception(f'Key not found: {attrstr}{pathstr}')"
] | [
"0.7844626",
"0.75669205",
"0.74641645",
"0.7052028",
"0.65783304",
"0.65595055",
"0.65575176",
"0.62750286",
"0.59552884",
"0.59359103",
"0.5933874",
"0.59200144",
"0.58311874",
"0.5820171",
"0.5779001",
"0.5731968",
"0.5724418",
"0.5718408",
"0.5717397",
"0.5695019",
"0.5671227",
"0.56208533",
"0.56131315",
"0.56086844",
"0.5582928",
"0.5576313",
"0.55444163",
"0.55284685",
"0.55279166",
"0.5527288"
] | 0.84244055 | 0 |
Test get_type_for_key_path with key path of one level deep | def test_get_type_for_key_path_depth_one_level(test_schema):
assert (
get_type_for_key_path(test_schema, "EmploymentInformation.OriginalHireDate")
== "string"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def GetKeyByPath(self, key_path):",
"def type(path):",
"def GetSubkeyByPath(self, key_path):",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root",
"def test_split_nested_class_from_key_no_nested(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertIsNone(part2)",
"def test_split_nested_class_from_key(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$nested')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, 'nested')",
"def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]",
"def _generic_test(self, pathstr, expected):\n self.assertEqual(self._get_pe_key(pathstr), expected)",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'",
"def path_lookup(data_obj, xj_path, create_dict_path=False):\n\n if not xj_path or xj_path == '.':\n return data_obj, True\n\n res = list(split(xj_path, '.', maxsplit=1))\n top_key = res[0]\n leftover = res[1] if len(res) > 1 else None\n if top_key == '*':\n return _full_sub_array(data_obj, leftover, create_dict_path)\n elif top_key.startswith('@'):\n return _single_array_element(data_obj, leftover, top_key,\n create_dict_path)\n else:\n val_type, top_key = _clean_key_type(top_key)\n top_key = unescape(top_key)\n if top_key in data_obj:\n value = data_obj[top_key]\n if val_type is not None and not isinstance(value, val_type):\n raise XJPathError(\n 'Key %s expects type \"%s\", but found value type is \"%s\"' %\n (top_key, val_type.__name__, type(value).__name__))\n if leftover:\n return path_lookup(value, leftover, create_dict_path)\n else:\n return value, True\n else:\n if val_type is not None:\n if not isinstance(data_obj, dict):\n raise XJPathError('Accessed object must be a dict type '\n 'for the key: \"%s\"' % top_key)\n if create_dict_path:\n data_obj[top_key] = val_type()\n else:\n return None, False\n if leftover:\n return path_lookup(data_obj[top_key], leftover,\n create_dict_path)\n else:\n return data_obj[top_key], True\n return None, False",
"def test_type_mapping_nested(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch')\n assert mapping\n assert 'properties' in mapping\n # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field\n assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'",
"def get_key_recursive(key, config):\n if not isinstance(key, list):\n key = key.split(\"/\") # subdict indexing split using slash\n assert key[0] in config, f\"missing key '{key[0]}' in metadata dictionary: {config}\"\n val = config[key[0]]\n if isinstance(val, (dict, collections.OrderedDict)):\n assert len(key) > 1, \"missing keys to index metadata subdictionaries\"\n return get_key_recursive(key[1:], val)\n return int(val)",
"def key_type(self) -> global___Type:",
"def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_traversal__path_type_view_name(path, resource_type, view_name):\n from pyramid.traversal import traverse\n root_resource = root_resource_factory()\n t = traverse(root_resource, path)\n assert isinstance(t['context'], resource_type)\n assert t['view_name'] == view_name",
"def test_create_node_from_key(self):\n created_node = self.test_graph.create_node_from_key(\n 'package.class$nested')\n self.assertEqual(created_node.package, 'package')\n self.assertEqual(created_node.class_name, 'class')\n self.assertEqual(created_node.name, 'package.class')",
"def get_by_dot_path(dictionary: Dict, key_path: str) -> Any:\n return get_by_list_of_keys(dictionary, key_path.split(\".\"))",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"def get_data(self,key=''):\n path = key.split('.')\n itm = self._root \n for ik,k in enumerate(path):\n child_found = False\n try: \n itm = itm[k]\n child_found = True\n except:\n try: \n itm = itm[int(k)]\n child_found = True\n except:\n longer_key = k\n for kk in path[ik+1:]:\n longer_key += '.'\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n longer_key += kk\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n if not child_found:\n raise KeyError(key)\n return itm",
"def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):",
"def test_split_nested_class_from_key_numeric(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$1')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, '1')",
"def test_two_keys():\n test = [{'key1': {'key2': 'val1'}}, ['key1', 'key2']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'"
] | [
"0.81315565",
"0.7790559",
"0.73348916",
"0.6510662",
"0.63362354",
"0.6123883",
"0.5945233",
"0.59033793",
"0.5871133",
"0.58505154",
"0.58181745",
"0.579614",
"0.5716585",
"0.56624275",
"0.5648696",
"0.56418836",
"0.5635143",
"0.5609441",
"0.5574678",
"0.55583984",
"0.55554205",
"0.55295515",
"0.5523099",
"0.5496569",
"0.54866785",
"0.54866785",
"0.54539424",
"0.5452325",
"0.54487824",
"0.54365486"
] | 0.78562295 | 1 |
Test get_type_for_key_path with multi level key path | def test_get_type_for_key_path_multi_level(test_schema):
assert (
get_type_for_key_path(test_schema, "EmploymentInformation.Beneficiary.Name")
== "string"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def GetKeyByPath(self, key_path):",
"def type(path):",
"def GetSubkeyByPath(self, key_path):",
"def test_add_keys_multiple_times(self):\n path = _path.Path.from_str(\"RootOper.Foo(*)\")\n with self.assertRaisesRegex(\n ValueError, \"Path element already has key information\"):\n path(4)",
"def _generic_test(self, pathstr, expected):\n self.assertEqual(self._get_pe_key(pathstr), expected)",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root",
"def __getitem__(self, key):\n path = self.path\n if self.path_is_string:\n path = [path]\n return path[key]",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False",
"def test_two_keys():\n test = [{'key1': {'key2': 'val1'}}, ['key1', 'key2']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def test_type_mapping(registry, item_type):\n with mappings_use_nested(False):\n mapping = type_mapping(registry[TYPES], item_type)\n assert mapping\n assert 'properties' in mapping\n if item_type == 'TestingLinkTargetElasticSearch':\n assert mapping['properties']['reverse_es'].get('type', 'object') != 'nested' # should not occur here\n\n # check calculated properties on objects/arrays of objects are mapped correctly\n if item_type == 'TestingCalculatedProperties':\n assert mapping['properties']['nested']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested']['properties']['keyvalue']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['key']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['value']['type'] == 'text'\n assert mapping['properties']['nested2']['properties']['keyvalue']['type'] == 'text'",
"def setKeyPath(*args, **kwargs)->List[AnyStr]:\n pass",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def key_type(self) -> global___Type:",
"def test_split_nested_class_from_key(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class$nested')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertEqual(part2, 'nested')",
"def test_split_nested_class_from_key_no_nested(self):\n part1, part2 = class_dependency.split_nested_class_from_key(\n 'pkg.name.class')\n self.assertEqual(part1, 'pkg.name.class')\n self.assertIsNone(part2)",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)",
"def test_get_transaction_types_key(self):\n pass",
"def test_type_mapping_nested(registry):\n with mappings_use_nested(True):\n mapping = type_mapping(registry[TYPES], 'TestingLinkTargetElasticSearch')\n assert mapping\n assert 'properties' in mapping\n # if type is defined on this field, it should beg object, NOT nested since it is not enabled on this field\n assert mapping['properties']['reverse_es'].get('type', 'object') == 'object'",
"def test_traversal__path_type_view_name(path, resource_type, view_name):\n from pyramid.traversal import traverse\n root_resource = root_resource_factory()\n t = traverse(root_resource, path)\n assert isinstance(t['context'], resource_type)\n assert t['view_name'] == view_name",
"def get(self, key, key_type=None):\n pass",
"def path_lookup(data_obj, xj_path, create_dict_path=False):\n\n if not xj_path or xj_path == '.':\n return data_obj, True\n\n res = list(split(xj_path, '.', maxsplit=1))\n top_key = res[0]\n leftover = res[1] if len(res) > 1 else None\n if top_key == '*':\n return _full_sub_array(data_obj, leftover, create_dict_path)\n elif top_key.startswith('@'):\n return _single_array_element(data_obj, leftover, top_key,\n create_dict_path)\n else:\n val_type, top_key = _clean_key_type(top_key)\n top_key = unescape(top_key)\n if top_key in data_obj:\n value = data_obj[top_key]\n if val_type is not None and not isinstance(value, val_type):\n raise XJPathError(\n 'Key %s expects type \"%s\", but found value type is \"%s\"' %\n (top_key, val_type.__name__, type(value).__name__))\n if leftover:\n return path_lookup(value, leftover, create_dict_path)\n else:\n return value, True\n else:\n if val_type is not None:\n if not isinstance(data_obj, dict):\n raise XJPathError('Accessed object must be a dict type '\n 'for the key: \"%s\"' % top_key)\n if create_dict_path:\n data_obj[top_key] = val_type()\n else:\n return None, False\n if leftover:\n return path_lookup(data_obj[top_key], leftover,\n create_dict_path)\n else:\n return data_obj[top_key], True\n return None, False",
"def get_data(self,key=''):\n path = key.split('.')\n itm = self._root \n for ik,k in enumerate(path):\n child_found = False\n try: \n itm = itm[k]\n child_found = True\n except:\n try: \n itm = itm[int(k)]\n child_found = True\n except:\n longer_key = k\n for kk in path[ik+1:]:\n longer_key += '.'\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n longer_key += kk\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n if not child_found:\n raise KeyError(key)\n return itm",
"def test_get_contact_person_types_key(self):\n pass",
"def test_get_types(self):\n pass"
] | [
"0.78702086",
"0.77100694",
"0.718522",
"0.6605864",
"0.6282174",
"0.62626797",
"0.5969291",
"0.59365714",
"0.59263676",
"0.5847719",
"0.5822397",
"0.5740006",
"0.5724306",
"0.57241476",
"0.56666523",
"0.56429887",
"0.563662",
"0.5624624",
"0.56129014",
"0.55840045",
"0.5535988",
"0.5535988",
"0.5510817",
"0.54844755",
"0.54772043",
"0.54347163",
"0.5424431",
"0.5419458",
"0.5401134",
"0.5395989"
] | 0.8355806 | 0 |
Test get_type_for_key_path with invalid key path | def test_get_type_for_key_path_invalid_key_path(test_schema):
assert get_type_for_key_path(test_schema, "foo.bar") == None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_type_for_key_path_simple_path(test_schema):\n assert get_type_for_key_path(test_schema, \"Age\") == \"integer\"",
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def _generic_test(self, pathstr, expected):\n self.assertEqual(self._get_pe_key(pathstr), expected)",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def GetKeyByPath(self, key_path):",
"def _check_key(self, key):\n raise NotImplementedError",
"def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]",
"def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))",
"def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)",
"def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)",
"def testFromStringTypeError(self):\n for path_type in dicom_path.Type:\n if path_type != dicom_path.Type.STORE:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.STORE_PATH_STR, path_type)\n if path_type != dicom_path.Type.STUDY:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.STUDY_PATH_STR, path_type)\n if path_type != dicom_path.Type.SERIES:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.SERIES_PATH_STR, path_type)\n if path_type != dicom_path.Type.INSTANCE:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.INSTANCE_PATH_STR, path_type)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def test_add_keys_multiple_times(self):\n path = _path.Path.from_str(\"RootOper.Foo(*)\")\n with self.assertRaisesRegex(\n ValueError, \"Path element already has key information\"):\n path(4)",
"def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)",
"def test_get_path_returns_none_for_bad_key(\n self, audio_store_and_expected_files, key):\n audio_store = audio_store_and_expected_files[0]\n assert audio_store.get_path(key) is None",
"def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def _check_key_type(cls, key: Any) -> K:\n if not isinstance(key, cls.keytype):\n raise KeyError(\n f\"{cls!r} accepts only keys of type {cls.keytype!r}, \"\n f\"got {type(key)!r}\"\n )\n return cast(K, key)",
"def type(path):",
"def _validate_type(self, key, type_):\n if type_ is None:\n type_ = \"\"\n \n if not isinstance(type_, (str, unicode)):\n raise TypeError(\"FileLink.type should be a str or unicode, \"\n \"not %s\" % type_.__class__.__name__)\n \n return type_",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def _is_generic_key(key):\n for prefix in [\n \"graph_rewriter_config\",\n \"model\",\n \"train_input_config\",\n \"train_config\",\n \"eval_config\"]:\n if key.startswith(prefix + \".\"):\n return True\n return False",
"def test_get_storage_invalid_suffix(self):\r\n self.assertRaises(KeyError, self.profile.get_storage, ('testing.json,'))",
"def test_getKey_tmpfile(self):\n filename = self.mktemp()\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))",
"def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None",
"def key_type(self) -> global___Type:"
] | [
"0.7941828",
"0.74922115",
"0.73346525",
"0.67035246",
"0.6578848",
"0.64337885",
"0.64240384",
"0.64203644",
"0.6280829",
"0.6259001",
"0.6171811",
"0.61548215",
"0.61433345",
"0.61050516",
"0.6074959",
"0.60654056",
"0.6059521",
"0.6050425",
"0.60475475",
"0.6045783",
"0.6044268",
"0.5995072",
"0.59939396",
"0.599052",
"0.59665674",
"0.5964877",
"0.5953391",
"0.5947495",
"0.5922025",
"0.59154105"
] | 0.8730529 | 0 |
Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. | def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str:
variables = self.__dict__
variables.update(kwargs)
template = CustomFormats().format(template, **variables)
if art:
art = art.format(nfo=template)
template = art
for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template):
# TODO: This if check is quite yucky, look into alternative options.
# Ideally a custom format spec would be great.
template = template.replace(
m.group(0),
m.group(2) if int(m.group(1)) else ""
)
template = "\n".join(map(str.rstrip, template.splitlines(keepends=False)))
return template | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render(template, variables={}):\r\n\treturn prettify( parse(template).render(dict(variables.items())) )",
"def format_template(template, *args):\n return textwrap.dedent(template % args).strip()",
"def formatEval(self, template, attrs, scale=1, noScale=None):\n # Boat width not stored, so calculated here...\n try:\n attrs.update({\n 'boatWidth': self.canvas.boat.attrs['width']\n - (self.canvas.boat.attrs['wallWidth'] * 2)\n })\n except:\n # Boat hasn't been added to self yet.\n pass\n\n # First put the values in place as normal.\n s = template.format(**attrs)\n # Then split it at the quotes into a list.\n s = s.split('\"')\n ret = s\n for i, section in enumerate(s):\n # Take the even elements from the list to\n # get the bits between the quotes.\n if (i+1)%2 == 0:\n # Try to evaluate it, if it causes an error, I will\n # assume it's not an expression, and leave it alone.\n try:\n result = eval(section)\n if not noScale == None and not noScale in s[i-1]:\n result *= scale\n except:\n result = section\n # Add the result back to the list.\n ret[i] = '\"' + str(result) + '\"'\n else:\n # The bits not in quotes are left alone.\n ret[i] = str(section)\n # Join the list back into a string.\n return ''.join(ret)",
"def render(self, template: str, **vars) -> str:",
"def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template",
"def part_render(self, attr, *a, **kw):\r\n style = kw.get('style', 'html')\r\n template = self.template(style)\r\n dt = template.get_def(attr)\r\n return unsafe(dt.render(thing = self, *a, **kw))",
"def insert_evaluate_variables(text, var_dict):\n if isinstance(text, list):\n text.insert(0, '{% load quest_render_tags %}')\n rndr_string = '\\n'.join(text)\n else:\n rndr_string = r'{% load quest_render_tags %} ' + text\n\n var_dict_rendered = {}\n for key, values in var_dict.iteritems():\n var_dict_rendered[key] = values[1]\n\n tmplte = Template(rndr_string)\n cntxt = Context(var_dict_rendered)\n return tmplte.render(cntxt)",
"def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value",
"def _element_formatting(template, values_template_dict, file_template, keep_extension):\n if file_template:\n fld_name_file, fld_value_file = file_template\n # splitting the filename for name and extension,\n # the final value used for formatting depends on the template and keep_extension flag\n name, *ext = Path(fld_value_file).name.split(\".\", maxsplit=1)\n filename = str(Path(fld_value_file).parent / name)\n # updating values_template_dic with the name of file\n values_template_dict[fld_name_file] = filename\n # if keep_extension is False, the extensions are removed\n if keep_extension is False:\n ext = []\n else:\n ext = []\n\n # if file_template is at the end of the template, the simplest formatting should work\n if file_template and template.endswith(f\"{{{fld_name_file}}}\"):\n # recreating fld_value with the updated extension\n values_template_dict[fld_name_file] = \".\".join([filename] + ext)\n formatted_value = template.format(**values_template_dict)\n # file_template provided, but the template doesn't have its own extension\n elif file_template and \".\" not in template:\n # if the fld_value_file has extension, it will be moved to the end\n formatted_value = \".\".join([template.format(**values_template_dict)] + ext)\n # template has its own extension or no file_template provided\n # the simplest formatting, if file_template is provided it's used without the extension\n else:\n formatted_value = template.format(**values_template_dict)\n return formatted_value",
"def apply_to(self, template):\n pass",
"def _substitute(template, fuzzer, benchmark):\n return template.format(fuzzer=fuzzer, benchmark=benchmark)",
"def render_string(self, template: str, **vars) -> str:",
"def render(self, template, *args, **kwargs):\n self._render(template, sys.stdout, *args, **kwargs)",
"def reformat(ctx):\n pass",
"def highlight(val, conditions: dict, tablefmt):\n val = round(val, ROUND)\n for color, cond in conditions.items():\n if tablefmt == 'simple':\n if cond:\n return pfont([color, 'BOLD'], format(round(val, ROUND), f\".{ROUND}f\"), PrintFont)\n elif tablefmt in ['latex', 'latex_raw']: # needs to be amended by hand\n if cond:\n return pfont([color, 'BOLD'], str(format(round(val, ROUND), f\".{ROUND}f\")), LaTeXFont)\n return format(val, f\".{ROUND}f\")",
"def render(template, context):\n if not template:\n return None\n\n text = \"\"\n filename = \"templates/\" + template\n with open(filename) as f:\n text = f.read()\n # First compile template into extended base template.\n is_child = re.search(extend_search, text.splitlines()[0])\n if is_child:\n base_filename = \"templates/\" + is_child.group(2)\n with open(base_filename) as base:\n text = extend_template(base.read(), text)\n # Run conditional checks\n has_conditions = re.search(if_search, text)\n if has_conditions:\n text = render_conditionals(text, context)\n # Replace any variables passed to the render function.\n for replace in context.replaces.keys():\n arg_search = re.compile(\"{{ \" + replace + \" }}\")\n text = re.sub(arg_search, context.replaces[replace], text)\n return text",
"def render_template(*args, **kwargs):\r\n params = {'cache_buster': cache_buster, 'user': {}, 'user_json': {}, 'PROD': PRODUCTION,\r\n 'static_route': 'http://cdn1.pythonhackers.com'}\r\n params.update(**kwargs)\r\n\r\n return template_render(*args, **params)",
"def render( *args, **kwargs ):",
"def render( context, *args, **kwargs ):",
"def persona_from_template_values(topic: str, topic_item: str, extra_details: str = ''):\n pers = f'My favorite {topic} is {topic_item}.'\n if extra_details:\n pers += f'\\n{extra_details}'\n return pers",
"def _render_thing(self, thing):\n function = \"{:}\".format\n if (type(thing) in self.fmatdict):\n function = self.fmatdict[type(thing)]\n return function(thing).strip()",
"def render(self, template, **kw):\n self.write(self.render_string(template, **kw))",
"def render(self, template, **kw):\n self.write(self.render_str(template, **kw))",
"def render(self, template, **kw):\n self.write(self.render_str(template, **kw))",
"def render(self, template, **kw):\n self.write(self.render_str(template, **kw))",
"def apply_format(self, **format_vars):\n for construction_dict in (self._actions, self._conditions):\n for construction_key, construction_objs in construction_dict.iteritems():\n for construction in construction_objs:\n construction.apply_format(**format_vars)",
"def format_html(format_string, *args, **kwargs):\n args_safe = map(conditional_escape, args)\n kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in\n six.iteritems(kwargs)])\n return mark_safe(format_string.format(*args, **kwargs))",
"def _text(self, template, **kw):\n ns = dict()\n ns['csv'] = _args_to_csv\n ns['f'] = _Namespace(kw)\n return Template(template).render(**ns)",
"def substitution_func_gen(self, variables, code):\n \n #print(self.rule.name, self.external_vars)\n ext, rest = separate(variables, lambda v: v in self.external_vars.keys())\n \n substitution_dict = dict()\n substitution_dict.update( { e : self.external_vars[e] for e in ext } )\n substitution_dict.update( { r : p(r) for r in rest } )\n \n new_format_string = code.format(**substitution_dict)\n \n return ( set(rest), lambda vd = { r : r for r in rest }: new_format_string.format(**vd) )",
"def template_file(task, template, path, jinja_filters=None, **kwargs):\n jinja_filters = jinja_filters or {} or task.nornir.config.jinja_filters\n merged = merge_two_dicts(task.host, kwargs)\n text = jinja_helper.render_from_file(\n template=template,\n path=path,\n host=task.host,\n jinja_filters=jinja_filters,\n **merged\n )\n return Result(host=task.host, result=text)"
] | [
"0.5875008",
"0.58742577",
"0.58426213",
"0.5590589",
"0.5554982",
"0.5440268",
"0.5436101",
"0.53913677",
"0.5359046",
"0.5331052",
"0.530326",
"0.5283194",
"0.5190745",
"0.51852167",
"0.5137004",
"0.51337534",
"0.5114587",
"0.50861675",
"0.5056899",
"0.5040804",
"0.5000452",
"0.49814096",
"0.49765435",
"0.49765435",
"0.49765435",
"0.4975772",
"0.49756187",
"0.49577516",
"0.49286833",
"0.49285817"
] | 0.6679136 | 0 |
Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. | def get_imdb_id(self, imdb_id: Any) -> str:
if not imdb_id:
general_track = self.media_info.general_tracks[0].to_data()
imdb_id = general_track.get("imdb")
if not imdb_id:
print("No IMDB ID was provided but is required...")
while not imdb_id or not isinstance(imdb_id, str):
user_id = input("IMDB ID (e.g., 'tt0487831'): ")
if not self.IMDB_ID_T.match(user_id):
print(f"The provided IMDB ID {user_id!r} is not valid...")
print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').")
else:
imdb_id = user_id
return imdb_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alternative_media_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alternative_media_id\")",
"def imdb_id(title):\n pass",
"def _get_id(mf, url=None):\n\n\tprops = mf['properties']\n\n\tif 'uid' in props:\n\t\treturn props['uid'][0]\n\telif 'url' in props:\n\t\treturn props['url'][0]\n\telse:\n\t\treturn None",
"def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)",
"def _get_device_id_from_environment() -> str:\n\n return os.environ[\"GOOGLE_MUSIC_DEVICE_ID\"]",
"def id(self):\n return self.settings['your_botid']",
"def media_id(self):\n try:\n return Html.toId(self.content)\n except:\n Mp3Error(1)",
"def media_content_id(self) -> str | None:\n if self._device.movie.handle:\n return self._device.movie.handle\n return None",
"def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])",
"def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f\"The provided TMDB ID {tmdb_id!r} is not valid...\")\n print(\"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\")\n raise ValueError(\"Invalid TMDB ID\")\n return tmdb_id",
"def lookup_by_id(i_d):\n imdb_id = 0\n str_id = str(i_d)\n if str_id[0].isdigit():\n #contact the moviedb api for inmdb id\n res = requests.get(\n f\"https://api.themoviedb.org/3/movie/{i_d}/external_ids?api_key=28dda9f76d76f128b47831768bc9a103\")\n res.raise_for_status()\n mov = res.json()\n imdb_id = mov[\"imdb_id\"]\n else:\n imdb_id = i_d\n # Contact API\n try:\n response = requests.get(\n f\"http://www.omdbapi.com/?i={imdb_id}&apikey=ced7be9a\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # parse response\n try:\n movie = response.json()\n return {\n \"title\":movie[\"Title\"],\n \"id\":movie[\"imdbID\"],\n \"plot\":movie[\"Plot\"],\n \"year\":movie[\"Year\"],\n \"poster\":movie[\"Poster\"],\n \"gross\":movie[\"BoxOffice\"],\n \"rating\":movie[\"imdbRating\"],\n \"website\":movie[\"Website\"],\n \"director\":movie[\"Director\"],\n \"writer\":movie[\"Writer\"],\n \"genre\":movie[\"Genre\"],\n \"actors\":movie[\"Actors\"]\n }\n\n except (KeyError, TypeError, ValueError):\n return None",
"def the_tvdb_dot_com_id(title):\n pass",
"def media_content_id(self) -> str | None:\n # The lovelace app loops media to prevent timing out, don't show that\n if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:\n return None\n media_status = self._media_status()[0]\n return media_status.content_id if media_status else None",
"def get_id(conf_name: str=CONFIG_FILE) -> Optional[int]:\n with open(conf_name, 'r') as fobj:\n data = json.load(fobj)\n\n uid = data.get('id')\n\n assert uid is None or isinstance(uid, int), \\\n 'The user id must be an integer if it exists'\n\n return uid",
"def spotify_id_from_token(access_token: str) -> Optional[str]:\n if access_token is None:\n return None\n headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n response = requests.post(\"https://api.spotify.com/v1/me\", headers=headers)\n if response.status_code != 200:\n return None\n user = response.json()\n if \"id\" not in user:\n return None\n return user[\"id\"]",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n #logger.debug('DAILYMOTION VIDEO FOUND %s' % url)\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/embed/video/') or p.path.startswith('/swf/video/')):\n # http://www.dailymotion.com/embed/video/xmp7zw\n return re.sub('_.+', '', path_list[2])\n elif len(path_list) == 2 and (p.path.startswith('/video/') or p.path.startswith('/swf/')):\n # http://www.dailymotion.com/video/xmp7zw_whatever\n # http://www.dailymotion.com/swf/xmp7zw\n return re.sub('_.+', '', path_list[1])\n \n return ''",
"def fetch_current_user_id(s):",
"def get_self_id(self):\n configfilepath=\"./camera.ini\"\n\n config = configparser.ConfigParser()\n config.read(configfilepath)\n camid = \"\"\n if config.has_section(\"camera\"):\n camid = config.get(\"camera\",\"id\")\n print(\"Found CamID in camera.ini: \" + camid)\n else:\n config.add_section(\"camera\")\n\n if (camid == \"\"):\n h = iter(hex(getnode())[2:].zfill(12))\n camid = \":\".join(i + next(h) for i in h)\n config.set(\"camera\",\"id\",camid)\n with open(configfilepath, 'w') as configfile:\n config.write(configfile)\n print(\"Generated CamID and wrote to camera.ini: \" + camid)\n \n return camid",
"def _get_ID(self):\n raw_data = imdb.search_for_title(self.title)\n if len(raw_data) > 1:\n raw_data = raw_data[0] # Pulls the first value of the title (the closest match)\n # if there is more than one\n self.ID = raw_data['imdb_id']",
"def _get_experiment_id(experiment_name: str, config: SQAConfig) -> Optional[int]:\n exp_sqa_class = config.class_to_sqa_class[Experiment]\n with session_scope() as session:\n sqa_experiment_id = (\n session.query(exp_sqa_class.id) # pyre-ignore\n .filter_by(name=experiment_name)\n .one_or_none()\n )\n\n if sqa_experiment_id is None:\n return None\n return sqa_experiment_id[0]",
"def get_id(connection):\n if connection is None:\n return None\n return connection.id",
"def _get_device_id_from_registered(api) -> str:\n\n try:\n api.oauth_login(\"bad\")\n except InvalidDeviceId as original_exception:\n error_message = original_exception.args[0]\n\n device_ids_str = error_message.split(\"Your valid device IDs are:\")[-1]\n device_ids = device_ids_str.split(\"\\n\")\n device_ids = [device_id.replace(\"* \", \"\") for device_id in device_ids]\n return device_ids[-1]",
"def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None",
"def media_content_id(self):\n return self._table.active_track.id if self._table.active_track else None",
"def fn_GetTMDBId(self, details):\n\n # If the custom url was not actually defined and we had no cached\n # data, then there is nothing to do.\n #\n if details is None:\n return\n print \"GetTMDBId details: %s\" % details",
"def get_picture_id(path):\n\t\tif path is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from fileuploader_picture WHERE file=%s\" % (path)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None",
"def get_mediatype_id(self, description):\n result = self.conn.mediatype.get(filter={'description': description})\n\n if result:\n mediatypeid = result[0]['mediatypeid']\n else:\n mediatypeid = None\n\n return mediatypeid",
"def unique_id(self):\n if self._uuid != '':\n return \"linkplay_media_\" + self._uuid",
"def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get(\"tvdb\")\n if not tvdb_id:\n print(\"Warning: No TVDB ID was provided...\")\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f\"The provided TVDB ID {tvdb_id!r} is not valid...\")\n print(\"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\")\n raise ValueError(\"Invalid TVDB ID\")\n return int(tvdb_id)"
] | [
"0.5761104",
"0.5743748",
"0.559037",
"0.5428841",
"0.5407892",
"0.5339938",
"0.5334296",
"0.53217006",
"0.5253112",
"0.52254945",
"0.52251637",
"0.5214888",
"0.5210509",
"0.5178844",
"0.5142981",
"0.5113672",
"0.5091264",
"0.5082251",
"0.5070948",
"0.5068511",
"0.5062682",
"0.505709",
"0.50503653",
"0.5044069",
"0.5041648",
"0.50350076",
"0.50331473",
"0.50239646",
"0.5009026",
"0.5008585"
] | 0.64897996 | 0 |
Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. | def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:
if not tmdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tmdb_id = general_track.get("tmdb")
if not tmdb_id:
print("Warning: No TMDB ID was provided...")
return None
if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):
print(f"The provided TMDB ID {tmdb_id!r} is not valid...")
print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').")
raise ValueError("Invalid TMDB ID")
return tmdb_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:\n if not tvdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tvdb_id = general_track.get(\"tvdb\")\n if not tvdb_id:\n print(\"Warning: No TVDB ID was provided...\")\n return None\n if isinstance(tvdb_id, int):\n tvdb_id = str(tvdb_id)\n if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):\n print(f\"The provided TVDB ID {tvdb_id!r} is not valid...\")\n print(\"Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').\")\n raise ValueError(\"Invalid TVDB ID\")\n return int(tvdb_id)",
"def tag_id(self, tag):\n assert isinstance(tag, str)\n\n df = self.dfs[\"tags\"]\n tag_records = df[df.tag == tag]\n if 1 == len(tag_records): \n return tag_records[\"id\"].values[0]\n elif 1 < len(tag_records): \n raise Exception(\"More than one record exist by tag\")\n else :\n # We should not be strict to tag name since it is a user input.\n import warnings\n warnings.warn(\"No record matched with tag\", Warning)\n return None",
"def get_imdb_id(self, imdb_id: Any) -> str:\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get(\"imdb\")\n if not imdb_id:\n print(\"No IMDB ID was provided but is required...\")\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f\"The provided IMDB ID {user_id!r} is not valid...\")\n print(\"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\")\n else:\n imdb_id = user_id\n return imdb_id",
"def media_id(self):\n try:\n return Html.toId(self.content)\n except:\n Mp3Error(1)",
"def get_id(conf_name: str=CONFIG_FILE) -> Optional[int]:\n with open(conf_name, 'r') as fobj:\n data = json.load(fobj)\n\n uid = data.get('id')\n\n assert uid is None or isinstance(uid, int), \\\n 'The user id must be an integer if it exists'\n\n return uid",
"def _get_id(mf, url=None):\n\n\tprops = mf['properties']\n\n\tif 'uid' in props:\n\t\treturn props['uid'][0]\n\telif 'url' in props:\n\t\treturn props['url'][0]\n\telse:\n\t\treturn None",
"def get_id(self) -> Optional[str]:\n return self.id_",
"def get_device_id(self) -> str:\n return Config.get('device_id')",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None",
"def alternative_media_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"alternative_media_id\")",
"def get_dataset_id(thing: object) -> t.DatasetId:\n if isinstance(thing, int):\n return t.DatasetId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.DatasetId(int_id)\n except ValueError:\n raise err.InvalidDatasetError(id=str(thing))",
"def getGUIDByBdcfg(configfile):\n generalDict, projectDict, solutionDict = Engine.readConfiguration(configfile)\n return projectDict['uuid']",
"def get_media_id_from_post(media_obj):\n if media_obj:\n media_id = media_obj.get('id')\n return media_id\n return",
"def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)",
"def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])",
"def _get_device_id_from_environment() -> str:\n\n return os.environ[\"GOOGLE_MUSIC_DEVICE_ID\"]",
"def get_id(html):\n\ttry:\n\t\tsong_id = re.findall('soundcloud://sounds:(.*?)\"', html)[0]\n\t\treturn song_id\n\texcept IndexError:\n\t\tprint(\"\\033[91m✘ Could not find song ID\\033[0m\")\n\t\tsys.exit()",
"def id(self) -> typing.Optional[str]:\n return self._values.get('id')",
"def id(self) -> typing.Optional[str]:\n return self._values.get('id')",
"def config_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"config_id\")",
"def get_id(self, name=None):\n\n # Support using integer IDs directly\n if isinstance(name, int):\n return name\n\n self.ensure_loaded()\n if name is not None:\n ems_systems = self.search('name', name.upper(), searchtype=\"match\")\n if ems_systems.empty:\n sys_names = self.list_all()['name'].to_list()\n raise ValueError(\n 'No matching systems found. You have access to: {0}'.format(sys_names))\n id = ems_systems.iloc[0]['id']\n else:\n ems_systems = self.list_all()\n if ems_systems.shape[0] == 1:\n id = ems_systems.iloc[0]['id']\n else:\n raise LookupError(\n 'Multiple ems systems found. Please select one from the available:\\n{0}'\n .format(ems_systems.loc[:, ['id', 'name']])\n )\n return id",
"def get_take_audio_id(self, take_id):\n def execute_sql(cursor):\n cursor.execute(\"SELECT audioId FROM Takes WHERE id = ?\",\n (take_id,))\n results = cursor.fetchone()\n if results is None:\n return None\n else:\n return results[0]\n \n def when_ready(dbpool):\n return dbpool.runInteraction(execute_sql)\n d = self.get_dbpool()\n d.addCallback(when_ready)\n\n def on_error(error):\n log.warn(\"Failed to get audio ID for take id ({take_id}): \"+\n str(error)\n )\n return error\n d.addErrback(on_error)\n\n return d",
"def get_record_id(thing: Union[\"Record\", t.RecordId, UUID, str]) -> t.RecordId:\n if isinstance(thing, UUID):\n return t.RecordId(thing)\n elif isinstance(thing, Record):\n return thing.id\n return t.RecordId(UUID(thing))",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id"
] | [
"0.64487255",
"0.5630184",
"0.5409801",
"0.54041606",
"0.54024845",
"0.53911626",
"0.5343543",
"0.53333956",
"0.5306317",
"0.5306317",
"0.5306317",
"0.5306317",
"0.5306317",
"0.5306317",
"0.52967757",
"0.52887344",
"0.52816087",
"0.5266011",
"0.52260435",
"0.522252",
"0.520782",
"0.5163452",
"0.5162279",
"0.5128807",
"0.5128807",
"0.5128392",
"0.5121896",
"0.51204026",
"0.51113844",
"0.50650084"
] | 0.64089936 | 1 |
Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. | def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:
if not tvdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tvdb_id = general_track.get("tvdb")
if not tvdb_id:
print("Warning: No TVDB ID was provided...")
return None
if isinstance(tvdb_id, int):
tvdb_id = str(tvdb_id)
if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):
print(f"The provided TVDB ID {tvdb_id!r} is not valid...")
print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').")
raise ValueError("Invalid TVDB ID")
return int(tvdb_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:\n if not tmdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n tmdb_id = general_track.get(\"tmdb\")\n if not tmdb_id:\n print(\"Warning: No TMDB ID was provided...\")\n return None\n if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):\n print(f\"The provided TMDB ID {tmdb_id!r} is not valid...\")\n print(\"Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').\")\n raise ValueError(\"Invalid TMDB ID\")\n return tmdb_id",
"def tag_id(self, tag):\n assert isinstance(tag, str)\n\n df = self.dfs[\"tags\"]\n tag_records = df[df.tag == tag]\n if 1 == len(tag_records): \n return tag_records[\"id\"].values[0]\n elif 1 < len(tag_records): \n raise Exception(\"More than one record exist by tag\")\n else :\n # We should not be strict to tag name since it is a user input.\n import warnings\n warnings.warn(\"No record matched with tag\", Warning)\n return None",
"def get_device_id(self) -> str:\n return Config.get('device_id')",
"def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None",
"def get_video_id(lookup_value, lookup_type='url'):\n if lookup_type == 'url':\n video_id = lookup_value.split('videos/')[1]\n elif lookup_type == 'content_id' or lookup_type == 'id':\n video_json = core.get_data('contents', lookup_value, return_json=True)\n video_id = video_json['id']\n else:\n errors.handlers.bad_lookup_type(lookup_type, ('url', 'content_id'))\n return video_id",
"def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]",
"def the_tvdb_dot_com_id(title):\n pass",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)",
"def _get_device_id_from_environment() -> str:\n\n return os.environ[\"GOOGLE_MUSIC_DEVICE_ID\"]",
"def getGUIDByBdcfg(configfile):\n generalDict, projectDict, solutionDict = Engine.readConfiguration(configfile)\n return projectDict['uuid']",
"def get_dataset_id(thing: object) -> t.DatasetId:\n if isinstance(thing, int):\n return t.DatasetId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.DatasetId(int_id)\n except ValueError:\n raise err.InvalidDatasetError(id=str(thing))",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")",
"def config_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"config_id\")",
"def get_url_param_by_id(measurement_id: str) -> str:\n if re.search('android|ios', measurement_id, re.IGNORECASE):\n return 'firebase_app_id'\n elif re.search('G-[A-Z0-9]{10}', measurement_id, re.IGNORECASE):\n return 'measurement_id'\n else:\n raise ValueError(f'Unsupported Measurement ID/Firebase App ID.')",
"def get_volume_from_id(item_id):\n return volumes[\"data\"][str(item_id)]",
"def getEpisodeId(path, conn):\n cur = conn.cursor()\n cur.execute(\"SELECT id_episode FROM episode WHERE path=?\", (path,))\n id_episode = cur.fetchone()[0]\n return id_episode",
"def get_id(self) -> Optional[str]:\n return self.id_",
"def get_imdb_id(self, imdb_id: Any) -> str:\n if not imdb_id:\n general_track = self.media_info.general_tracks[0].to_data()\n imdb_id = general_track.get(\"imdb\")\n if not imdb_id:\n print(\"No IMDB ID was provided but is required...\")\n while not imdb_id or not isinstance(imdb_id, str):\n user_id = input(\"IMDB ID (e.g., 'tt0487831'): \")\n if not self.IMDB_ID_T.match(user_id):\n print(f\"The provided IMDB ID {user_id!r} is not valid...\")\n print(\"Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').\")\n else:\n imdb_id = user_id\n return imdb_id",
"def get_id(conf_name: str=CONFIG_FILE) -> Optional[int]:\n with open(conf_name, 'r') as fobj:\n data = json.load(fobj)\n\n uid = data.get('id')\n\n assert uid is None or isinstance(uid, int), \\\n 'The user id must be an integer if it exists'\n\n return uid",
"def get_video_id(vid_folder_string):\n parts = vid_folder_string.split(\"_\")\n return parts[0] + \"_\" + parts[1]",
"def get_id(html):\n\ttry:\n\t\tsong_id = re.findall('soundcloud://sounds:(.*?)\"', html)[0]\n\t\treturn song_id\n\texcept IndexError:\n\t\tprint(\"\\033[91m✘ Could not find song ID\\033[0m\")\n\t\tsys.exit()",
"def volume_id(self):\n if self.volume:\n return self.volume.id\n else:\n return None",
"def trace_id_get() -> tuple[str, str] | None:\n return trace_id_cv.get()"
] | [
"0.60287786",
"0.5454019",
"0.5437956",
"0.5360313",
"0.5346196",
"0.5333645",
"0.5321893",
"0.52784383",
"0.5270552",
"0.5240752",
"0.5226761",
"0.5213634",
"0.5184168",
"0.5155326",
"0.5155326",
"0.5155326",
"0.5155326",
"0.5155326",
"0.5155326",
"0.51372594",
"0.5098536",
"0.5097522",
"0.5085566",
"0.5076998",
"0.5059803",
"0.5056156",
"0.5040745",
"0.5027556",
"0.5021359",
"0.5015722"
] | 0.694675 | 0 |
Scrape Title Name and Year (including e.g. 2019) from IMDB | def get_title_name_year(self) -> Tuple[str, str]:
r = self.session.get(f"https://www.imdb.com/title/{self.imdb}")
if r.status_code != 200:
raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]")
imdb_page = html.unescape(r.text)
imdb_title = re.search(
# testing ground: https://regex101.com/r/bEoEDn/1
r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)"
r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>",
imdb_page
)
if not imdb_title:
raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...")
return imdb_title.group("name").strip(), imdb_title.group("year").strip() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape_movie_page(dom):\n # to save the information\n info = []\n\n # find the information block needed\n header = dom.find(\"div\", \"title_wrapper\")\n\n # find the title and strip the string\n name_dom = header.h1.get_text().encode(\"utf-8\")\n name = str(name_dom)[2:-16]\n info.append(name)\n\n # find the year and strip the year\n year_dom = header.h1.span.get_text().encode(\"utf-8\")\n year = str(year_dom)[3:-2]\n info.append(year)\n\n # find the duration and strip the string\n duration_dom = dom.find(\"time\", itemprop=\"duration\").get_text().encode(\"utf-8\")\n duration = str(duration_dom)[28:-23]\n info.append(duration)\n\n # find all the genres and strip the string\n genre_dom = dom.find(\"div\", itemprop=\"genre\").a.get_text().encode(\"utf-8\")\n genre = find_genres(genre_dom, dom)\n info.append(genre)\n\n # find all the directors and strip the string\n director_dom = dom.find(\"span\", itemprop=\"director\").get_text().encode(\"utf-8\")\n director = find_directors(director_dom, dom)\n info.append(director)\n\n # find all the writers and strip the string\n writer_dom = dom.find(\"span\", itemprop=\"creator\").a.get_text().encode(\"utf-8\")\n writer = find_writers(writer_dom, dom)\n info.append(writer)\n\n # find all the actors and strip the string\n actor_dom = dom.find(\"span\", itemprop=\"actors\").a.get_text().encode(\"utf-8\")\n actor = find_actors(actor_dom, dom)\n info.append(actor)\n\n # find the rating and strip the string\n rating_dom = dom.find(\"span\", itemprop=\"ratingValue\").get_text().encode(\"utf-8\")\n rating = str(rating_dom)[2:-1]\n info.append(rating)\n\n # find the number of ratings and strip the string\n number_ratings_dom = dom.find(\"span\", itemprop=\"ratingCount\").get_text().encode(\"utf-8\")\n number_ratings = str(number_ratings_dom)[2:-1]\n info.append(number_ratings)\n\n return info",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def extract_movie_header(soup: BeautifulSoup) -> Tuple[str, str]:\n\n header = soup.find(\"h3\", class_=\"lister-item-header\")\n\n title = header.a.get_text()\n\n year = header.find(\"span\", class_=\"lister-item-year\").get_text()[-5:-1]\n year = int(year)\n\n return title, year",
"def getMovieInfo(endpoint, title, year):\n\n params = {'t': title, 'y': year, 'plot':'short', 'r':'json', 'tomatoes':'true'}\n response = requests.get(endpoint, params=params)\n\n try:\n response.raise_for_status()\n response = response.json()\n\n if 'Error' in response.keys():\n raise LookupError\n\n results = {}\n strkeys = ['Actors', 'Director', 'Genre', 'Plot', 'Rated', 'Released', 'imdbID', 'tomatoConsensus']\n intkeys = ['Runtime', 'Metascore', 'imdbVotes', 'tomatoMeter', 'tomatoReviews']\n fltkeys = ['imdbRating']\n\n for key in strkeys:\n results[key] = response[key] if response[key] != 'N/A' else None\n for key in intkeys:\n results[key] = int(re.sub(r'[^\\d]', '', response[key])) if response[key] != 'N/A' else None\n for key in fltkeys:\n results[key] = float(re.sub(r'[^\\d]', '', response[key])) if response[key] != 'N/A' else None\n return results\n\n except requests.exceptions.HTTPError:\n print(\"There was a problem with the HTTP request: {0}\".format(response.status_code))\n except requests.exceptions.Timeout:\n print(\"The HTTP request timed out\")\n except LookupError:\n pass\n return None",
"def list_titles(genre):\n text = genre_html(genre)\n num_titles = text.count('title=')\n\n titles = []\n for i in range(num_titles):\n start = text.find('title=')\n end = text[start+7:].find('\">')\n title = text[start+7:start+end]\n titles.append(title)\n text = text[start+7:]\n\n return titles",
"def get_year_from_movielist_title(title):\n match = re.match(r'.*\\s+\\((\\d+)\\)', title)\n year = int(match.groups()[0])\n return year",
"def get_movie_info(page: str, verbose:bool = True):\n\n def add_scoreInfo(pattern, raw_text, keyName):\n \"\"\"inner helper function to help add score information\n :param pattern: pattern to match\n :param raw_text: html text\n :param keyName: key name to be append to the dict\n \"\"\"\n match_pat = re.search(pattern, raw_text)\n if match_pat is None:\n info[keyName] = None\n else:\n info[keyName] = match_pat.group(1)\n\n info = dict() \n \n # verbose option\n if verbose:\n print('scraping main page')\n print('scraping url: ' + page)\n \n # make soup\n soup = _make_soup(page)\n \n if soup == '':\n return None\n \n else:\n ### extraction ###\n # movie id\n movieId = soup.find('a', href=re.compile('movieId=[0-9]+'))\n if movieId is None:\n info['movie_link'] = None\n else:\n movieId = re.search('movieId=([0-9]+)$', movieId[\"href\"])\n info['movie_link'] = '/m/'+ movieId.group(1)\n \n movieInfo= soup.find('script', type=\"application/ld+json\")\n if movieInfo is None:\n print('No movie information for this movie.')\n else:\n # movie name\n movieName = re.search('\"name\":\"?(.+?)\"?,\"', movieInfo.get_text())\n if movieName is None:\n info['movie_name'] = None\n else:\n info['movie_name'] = movieName.group(1)\n \n # rating\n rating = re.search('\"contentRating\":\"?(.+?)\"?,\"',movieInfo.get_text())\n if rating is None:\n info['rating'] = None\n else:\n info['rating'] = rating.group(1)\n \n # genre \n genre = re.search('\"genre\":\\[\"(.+?)\"\\]', movieInfo.get_text())\n if genre is None:\n info['genre'] = None\n else:\n info['genre'] = genre.group(1).replace('\"','')\n \n # directors\n directors = re.search('\"director\":(.+?),\"author\"', movieInfo.get_text())\n if directors is None:\n info['directors'] = None\n else:\n info['directors'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', directors.group(1)))\n \n # writers\n writers = re.search('\"director\":.+?\"author\":(.+?),\"genre\"', movieInfo.get_text())\n if writers is None:\n info['writers'] = None\n else:\n info['writers'] = ','.join(re.findall('\"name\":\"(.+?)\",\"', writers.group(1)))\n \n # movie synopsis\n movieSyno = soup.find('div', id=re.compile('movieSynopsis'))\n if movieSyno is None:\n info['movie_info'] = None\n else:\n info['movie_info'] = movieSyno.get_text().strip()\n \n # poster_image\n poster_img = soup.find('meta', property = re.compile('image$'))\n if poster_img is None:\n info['poster_image'] = None\n else:\n info['poster_image'] = poster_img[\"content\"]\n \n # cast\n casts = soup.find_all('div', class_=re.compile('^cast-item'))\n if casts is None:\n info['casts'] = None\n else:\n info['casts'] = ','.join([cast.find('span').get_text().strip() for cast in casts])\n \n # in_theaters_date\n in_theaters_date = soup.find('div', text=re.compile(\"In Theaters\"))\n if in_theaters_date is None:\n info['in_theaters_date'] = None\n else:\n info['in_theaters_date'] = in_theaters_date.find_next_sibling('div').find('time').get_text().strip()\n \n # on_streaming_date\n on_streaming_date = soup.find('div', text=re.compile(\"On Disc/Streaming:\"))\n if on_streaming_date is None:\n info['on_streaming_date'] = None\n else:\n info['on_streaming_date'] = on_streaming_date.find_next_sibling('div').find('time').get_text().strip()\n \n # runtime_in_minutes\n runtime_in_minutes = soup.find('div', text=re.compile(\"Runtime:\"))\n if runtime_in_minutes is None:\n info['runtime_in_minutes'] = None\n else:\n info['runtime_in_minutes'] = re.search('[0-9]+',runtime_in_minutes.find_next_sibling('div').find('time').get_text().strip()).group(0)\n # studio_name\n studio_name = soup.find('div', text=re.compile(\"Studio:\"))\n if studio_name is None:\n info['studio_name'] = None\n else:\n info['studio_name'] = studio_name.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n # Extra: box office\n box_office = soup.find('div', text=re.compile(\"Box Office:\"))\n if box_office is None:\n info['box_office'] = None\n else:\n info['box_office'] = box_office.find_next_sibling('div', class_=\"meta-value\").get_text().strip()\n \n scoreInfo = soup.find('script', type=\"text/javascript\")\n if scoreInfo is None:\n print('No score information for this movie.')\n else:\n pat_head1 = 'root.RottenTomatoes.context.scoreInfo.+?'\n pat_keywrd = '\"consensus\":'\n pat_tail1 = '\"?(.+?)\"?,\"'\n pat_tail2 = '\"?([0-9]+?)\"?,\"'\n pat_tail3 = '\"?([0-9\\.]+?)\"?,\"'\n # critics_consensus\n criticsCns_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(criticsCns_pat, scoreInfo.get_text(), 'critics_consensus')\n \n # tomatometer_status\n pat_keywrd ='\"tomatometerState\":'\n tmtStatus_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(tmtStatus_pat, scoreInfo.get_text(), 'tomatometer_status')\n\n # tomatometer_rating\n pat_keywrd = '\"score\":'\n tmtRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtRating_pat, scoreInfo.get_text(), 'tomatometer_rating')\n\n # tomatometer_count\n pat_keywrd ='\"numberOfReviews\":'\n tmtCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(tmtCnt_pat, scoreInfo.get_text(), 'tomatometer_count')\n \n # audience_status\n audStatus_pat = 'root.RottenTomatoes.context.popcornMeterState.+?\"(.+?)\";'\n add_scoreInfo(audStatus_pat, scoreInfo.get_text(), 'audience_status')\n\n # Extra: audience_want_to_see\n audWantToSee_pat = 'root.RottenTomatoes.context.wantToSeeData.+?\"wantToSeeCount\":' + pat_tail2\n add_scoreInfo(audWantToSee_pat, scoreInfo.get_text(), 'audience_want_to_see_count')\n \n # audience_rating\n pat_keywrd = '\"audienceAll\".+?\"score\":'\n audRating_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRating_pat, scoreInfo.get_text(), 'audience_rating')\n\n # audience_count\n pat_keywrd = '\"audienceAll\".+?\"ratingCount\":'\n audCnt_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audCnt_pat, scoreInfo.get_text(), 'audience_count')\n\n # audience_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"numberOfReviews\":'\n audTopCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audTopCritics_pat, scoreInfo.get_text(), 'audience_top_critics_count')\n \n # audience_fresh_critics_count\n pat_keywrd = '\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_critics_count')\n \n # audience_rotten_critics_count\n pat_keywrd = '\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_critics_count')\n\n # Extra: audience_fresh_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"freshCount\":'\n audFreshCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audFreshCritics_pat, scoreInfo.get_text(), 'audience_fresh_top_critics_count')\n\n # Extra: audience_rotten_top_critics_count\n pat_keywrd = '\"tomatometerTopCritics\".+?\"rottenCount\":'\n audRottenCritics_pat = pat_head1 + pat_keywrd + pat_tail2\n add_scoreInfo(audRottenCritics_pat, scoreInfo.get_text(), 'audience_rotten_rotten_critics_count')\n \n # Extra: tomatometer_avg_rating\n pat_keywrd = '\"avgScore\":'\n tmtAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(tmtAvgRating_pat, scoreInfo.get_text(), 'tomatometer_avg_rating')\n\n # Extra: audience_top_critics_avg_rating\n pat_keywrd = '\"tomatometerTopCritics\".+?\"avgScore\":'\n audTopCriticsAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audTopCriticsAvgRating_pat, scoreInfo.get_text(), 'audience_top_critics_avg_rating')\n\n # Extra: Score Sentiment\n pat_keywrd = '\"scoreSentiment\":'\n scoreSentiment_pat = pat_head1 + pat_keywrd + pat_tail1\n add_scoreInfo(scoreSentiment_pat, scoreInfo.get_text(), 'score_sentiment')\n\n # Extra: audience_avg_rating\n pat_keywrd = '\"averageRating\":'\n audienceAvgRating_pat = pat_head1 + pat_keywrd + pat_tail3\n add_scoreInfo(audienceAvgRating_pat, scoreInfo.get_text(), 'audience_avg_rating')\n print('done scraping movie info')\n return info",
"def extract_names(filename):\n raw_text = read_html(filename) \n \n #searching for the year\n year = re.search('(<h3 align=\"center\">Popularity in )(\\d\\d\\d\\d)',raw_text).group(2)\n \n #searching for the list of names\n list_of_names = re.findall('<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',raw_text)\n \n #pair each name with it's rank\n name_and_rank = [] \n for line in list_of_names:\n name_and_rank.append((line[1], line[0]))\n name_and_rank.append((line[2], line[0]))\n \n # sort the list alphabetically\n name_and_rank = sorted(name_and_rank, key = lambda x:x[0])\n name_and_rank = dict(name_and_rank)\n\n return year, name_and_rank[:20]",
"def parse_title_and_year(self, refstr):\n\n refstr = self.re_cleanup_unstructured.sub(', ', refstr, 1)\n match = self.rec_field_unstructured.match(refstr)\n if match:\n year = match.group('year')\n title = match.group('title')\n return title,year\n return None,None",
"def parse_movie_page(movie_url: str) -> Dict[str, str]:\n movie_page = get_soup_for_page(movie_url)\n\n # title and id\n movie_id = movie_url.split(\"/\")[-2]\n title = movie_page.find(\"div\", class_=\"title_wrapper\").find(\"h1\").get_text(\";\", strip=True).split(\";\")[0]\n\n # director and stars\n credit_summary_elements = movie_page.find_all(\"div\", class_=\"credit_summary_item\")\n director = credit_summary_elements[0].find(\"a\").text if len(credit_summary_elements) > 0 else \"\"\n if len(credit_summary_elements) > 2:\n stars_links = credit_summary_elements[2].find_all(\"a\")\n stars = [str(elem.text) for elem in stars_links[:-1]]\n else:\n stars = []\n movie_data = {\n \"id\": movie_id,\n \"title\": title,\n \"director\": director,\n \"stars\": stars,\n }\n print(movie_data)\n return movie_data",
"def imdb_crawl_by_year(year, verbose):\n _crawl_by_year_helper(year, verbose, True, False)",
"def movie(response):\n\n response = response.json()\n\n if response.get(\"Error\"):\n raise NotFoundError(response[\"Error\"])\n\n if response[\"Type\"] != \"movie\":\n raise NotFoundError(\"Type is {}, should be movie\".format(response[\"Type\"]))\n\n return [OrderedDict([(\"Title\", response[\"Title\"]),\n (\"ID\", response[\"imdbID\"]),\n (\"Rating\", response[\"imdbRating\"]),\n (\"Year\", response[\"Year\"].split(u\"\\u2013\")[0])])]",
"def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"發佈日期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text",
"def get_movies(iurl):\n movies = []\n \n if iurl[-3:] == '?s=':\n search_text = GetSearchQuery('WatchOnlineMovies')\n search_text = urllib.quote_plus(search_text)\n iurl += search_text\n\n html = requests.get(iurl, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('postbox')})\n items = BeautifulSoup(html, parseOnlyThese=mlink)\n plink = SoupStrainer('div', {'class':'wp-pagenavi'})\n Paginator = BeautifulSoup(html, parseOnlyThese=plink)\n\n for item in items:\n title1 = item.h2.text\n try:\n title2 = title1.replace(\"Full Movie\", \"\")\n except:\n title2 = title1.replace(\"Watch Online\", \"\")\n try:\n title3 = title2.replace(\"Watch Online Placeholdernt\", \"\")\n except:\n title3 = title2.replace(\".\", \"\")\n try:\n title4 = title3.replace(\".\", \"\")\n except:\n title4 = title3.replace(\"Watch Online Placeholder\",\"\")\n try:\n title5 = title4.replace(\"Watch Online\", \"\")\n except:\n title5 = title4.replace(\"Download\",\"\")\n try:\n title6 = title5.replace(\"Watch Onlin\", \"\")\n except:\n title6 = title5.replace(\"Placeholder\",\"\")\n try:\n title7 = title6.replace(\"HD Pri\", \"\")\n except:\n title7 = title6.replace(\"Placeholder\",\"\")\n try:\n title8 = title7.replace(\" Watch On\", \"\")\n except:\n title8 = title7.replace(\"Placeholder\",\"\")\n try:\n title9 = title8.replace(\" Watch\", \"\")\n except:\n title9 = title8.replace(\"Placeholder\",\"\")\n try:\n title10 = title9.replace(\"Free Down\", \"\")\n except:\n title10 = title9.replace(\"Placeholder\",\"\")\n try:\n title11 = title10.replace(\"Free D\", \"\")\n except:\n title11 = title10.replace(\"Placeholder\",\"\")\n try:\n title12 = title11.replace(\"Free\", \"\")\n except:\n title12 = title11.replace(\"Placeholder\",\"\")\n try:\n title13 = title12.replace(\" F\", \"\")\n except:\n title13 = title12.replace(\"Placeholder\",\"\")\n try:\n title14 = title13.replace(\" Fr\", \"\")\n except:\n title14 = title13.replace(\"Placeholder\",\"\")\n try:\n title15 = title14.replace(\" Fre\", \"\")\n except:\n title15 = title14.replace(\"Placeholder\",\"\")\n try:\n title16 = title15.replace(\" HD\", \"\")\n except:\n title16 = title15.replace(\"Placeholder\",\"\")\n try:\n title17 = title16.replace(\" H\", \"\")\n except:\n title17 = title16.replace(\"Placeholder\",\"\")\n try:\n title18 = title17.replace(\" HD P\", \"\")\n except:\n title18 = title17.replace(\"Placeholder\",\"\")\n try:\n title19 = title18.replace(\" re\", \"\")\n except:\n title19 = title18.replace(\"Placeholder\",\"\")\n try:\n title120 = title19.replace(\" r\", \"\")\n except:\n title120 = title19.replace(\"Placeholder\",\"\")\n # Coloring Years\n try:\n title21 = title120.replace(\"(2018)\", \"[COLOR yellow](2018)[/COLOR]\")\n except:\n title21 = title120.replace(\"Placeholder\",\"\")\n try:\n title22 = title21.replace(\"(2016)\", \"[COLOR lightsalmon](2016)[/COLOR]\")\n except:\n title22 = title21.replace(\"Placeholder\",\"\")\n try:\n title23 = title22.replace(\"(2015)\", \"[COLOR lime](2016)[/COLOR]\")\n except:\n title23 = title22.replace(\"Placeholder\",\"\")\n # Language\n try:\n title24 = title23.replace(\"Hindi\", \"[COLOR green]Hindi[/COLOR]\")\n except:\n title24 = title23.replace(\"Placeholder\",\"\")\n try:\n title25 = title24.replace(\"Dubbed\", \"[COLOR cyan]Dubbed[/COLOR]\")\n except:\n title25 = title24.replace(\"Placeholder\",\"\")\n\n # Continued\n try:\n title26 = title25.replace(\" nt o\", \"\")\n except:\n title26 = title25.replace(\"Placeholder\",\"\")\n try:\n title27 = title26.replace(\" nt F\", \"\")\n except:\n title27 = title26.replace(\"Placeholder\",\"\")\n try:\n title28 = title27.replace(\" nt\", \"\")\n except:\n title28 = title27.replace(\"Placeholder\",\"\")\n try:\n title = title28.replace(\" Pr\", \"\")\n except:\n title = title28.replace(\"Placeholder\",\"\")\n\n url = item.h2.find('a')['href']\n try:\n thumb = item.find('img')['src'].strip()\n except:\n thumb = _icon\n movies.append((title, thumb, url))\n \n if 'next' in str(Paginator):\n\n nextli = Paginator.find('a', {'class':re.compile('page larger')})\n\n purl = nextli.get('href')\n pages = Paginator.findAll('span', {'class':re.compile('pages')})\n lastpg = pages[len(pages)-1].text\n title = 'Next Page.. (Currently in %s)' % (lastpg)\n movies.append((title, _icon, purl))\n \n return movies",
"def __get_movies(title):\n params = {\n 's': title,\n 'type': 'movie'\n }\n\n response = requests.get(API_URL + API_KEY, params=params).json()\n return response",
"def crawl_movie_profile(movie_name, year=None):\n\n # Search\n query = _TITLE_QUERY.format(title=_convert_title(movie_name))\n search_res = bs(request.urlopen(query), \"html.parser\")\n tables = search_res.find_all(\"table\", {\"class\": \"findList\"})\n if len(tables) < 1:\n return {}\n res_table = tables[0]\n if year is None:\n movie_row = res_table.find_all(\"tr\")[0]\n else:\n for row in res_table.find_all(\"tr\"):\n if (str(year) in str(row)) or (str(year-1) in str(row)):\n movie_row = row\n movie_code = re.findall(_MOVIE_CODE_REGEX, str(movie_row))[0]\n\n # Movie Profile\n cur_profile_url = _PROFILE_URL.format(code=movie_code)\n prof_page = bs(request.urlopen(cur_profile_url), \"html.parser\")\n\n # Extracting properties\n props = {}\n props['name'] = movie_name\n props['rating'] = _get_rating(prof_page)\n props['rating_count'] = _get_rating_count(prof_page)\n props['genres'] = _get_geners(prof_page)\n props['user_review_count'], props['critic_review_count'] = \\\n _get_review_counts(prof_page)\n props['metascore'] = _get_metascore(prof_page)\n props['year'] = _get_year(prof_page)\n props['duration'] = _get_duration(prof_page)\n props.update(_get_box_office_props(prof_page))\n props.update(_get_rating_props(movie_code))\n props.update(_get_business_props(movie_code))\n props.update(_get_release_props(movie_code))\n props.update(_get_reviews_props(movie_code))\n return props",
"def find_year(title):\n # find all patterns that match the year pattern\n matches = year_pattern.findall(title)\n # if any matches\n if matches:\n # record for convienence\n year = matches[-1]\n too_short = len(title) < 8\n # If the year is the title then return None\n if year == title:\n return None\n # If we have enough room for 1 block of 4 digits and its at the start\n elif too_short and title.startswith(year):\n return None\n else:\n return year",
"def parse_top_movies(html: str) -> ResultSet:\n\n soup = BeautifulSoup(html, \"html.parser\")\n return soup.find_all(\"div\", class_=\"lister-item-content\")",
"def genre_html(genre):\n genre = genre\n link = 'https://www.imsdb.com/genre/%s' % genre\n html = str(BeautifulSoup(requests.get(link).text, 'lxml'))\n\n start = html.find('<h1>Romance Movie Scripts</h1>')\n end = html[start:].find('</td>')\n return html[start:start+end]",
"def imdb_id(title):\n pass",
"def get_info_game(soup):\n info = []\n\n content = soup.select(\"div.fftit.s20.b\").pop()\n info.append(content.span.text)\n info.append(re.search(r'\\((.*?)\\)', content.text).group(1))\n\n for dt, dd in zip(soup.findAll(\"dt\"), soup.findAll(\"dd\")):\n if dt.text == \"Desarrollador:\":\n info.append(dd.text)\n elif dt.text == \"Editor:\":\n info.append(dd.text)\n elif dt.text == \"Género:\":\n info.append(dd.text)\n\n info.append(soup.find(\"span\", {\"itemprop\": \"releaseDate\"}).attrs['content'])\n\n info.extend([div.span.text for div in soup.select(\"div.dtc.wi36\")])\n\n return zip([\"name\", \"platform\", \"study\", \"publisher\", \"genre\", \"releaseDate\", \"3DJuegosScore\", \"userScore\"], info)",
"def meta_extract(doc):\n title_search = re.compile(r'(title:\\s*)(?P<title>.*(\\n *\\w.*)*)(\\nauthor:)', re.IGNORECASE)\n author_search = re.compile(r'(author:)(?P<author>.*)', re.IGNORECASE)\n translator_search = re.compile(r'(translator:)(?P<translator>.*)', re.IGNORECASE)\n illustrator_search = re.compile(r'(illustrator:)(?P<illustrator>.*)', re.IGNORECASE)\n title = re.search(title_search, doc).group('title')\n author = re.search(author_search, doc)\n translator = re.search(translator_search, doc)\n illustrator = re.search(illustrator_search, doc)\n if author: \n author = author.group('author')\n if translator:\n translator = translator.group('translator')\n if illustrator:\n illustrator = illustrator.group('illustrator')\n print \"Title: {}\".format(title)\n print \"Author(s): {}\".format(author)\n print \"Translator(s): {}\".format(translator)\n print \"Illustrator(s): {}\\n\".format(illustrator)\n # return title, author, illustrator, translator",
"def search_mApe_title (title,format):\n\n mape_main_url = 'https://www.mightyape.co.nz/'\n # Defining the url paths for search types\n mape_mv_category_url = 'movies-tv/movies/all?q='+parse.quote_plus(title)+\"+\"\n mape_mv_format_search_url = 'movieformat~'+format\n\n # This is the final url string\n\n searchUrl = mape_main_url+mape_mv_category_url+mape_mv_format_search_url\n #'https://www.mightyape.co.nz/movies-tv/movies/all?sort=2&q=movieformat~blu-ray'\n\n # Using a dictionary to store data, as contains list with objects\n mape_list = {}\n\n page = requests.get(searchUrl)\n tree = html.fromstring(page.content)\n\n data = tree.xpath(\n '//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]/div[@class=\"title\"]/a') # <--- WORKS\n\n data_alt = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]')\n\n print('Getting results from url:', searchUrl)\n print('Number of objects=', len(data_alt))\n count = 1\n\n for item in data_alt:\n simple_item = item.xpath('div[@class=\"title\"]/a')\n title = simple_item[0].text\n link = simple_item[0].get('href')\n format = item.xpath('div[@class=\"format\"]/text()')\n rating = item.xpath('div[@class=\"customer-rating\"]/span/span[@class=\"average\"]/text()')\n base_price = item.xpath('div[@class=\"price\"]/s/text()')\n hot_price = item.xpath('div[@class=\"price\"]/span[@class=\"price hot\"]/text()')\n normal_price = item.xpath('div[@class=\"price\"]/span[@class=\"price\"]/text()')\n if len(rating) > 0:\n # temp_mv = Movie_object(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n print(title, format[0], rating[0].strip(), mape_main_url + link, normal_price, base_price, hot_price)\n # mape_list[title] = temp_mv\n else:\n print(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n # temp_mv = Movie_object(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n # mape_list[title] = temp_mv\n\n count += 1\n\n return mape_list",
"def tmdb_info(title):\n result = False\n search_result = tmdb.Movies(title, limit=True)\n for movie in search_result.iter_results():\n result = movie\n break\n return result",
"def extract_movie_titles(dictionary):\n results = dictionary['Similar']['Results']\n lstmt = [d['Name'] for d in results]\n return lstmt",
"def check_ratings(self):\n\n self.browser.get('https://www.imdb.com/')\n\n for title in self.titles:\n input_bar = self.browser.find_element_by_id('navbar-query')\n input_bar.clear()\n\n input_bar.send_keys(title)\n input_bar.send_keys(Keys.RETURN)\n\n time.sleep(3)\n\n # Click on the first suggestion\n css_selector = \"div.findSection:nth-child(3) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > a:nth-child(1)\"\n self.browser.find_element_by_css_selector(css_selector).click()\n time.sleep(3)\n\n # Pull details that will always be available\n score = str(self.browser.find_element_by_class_name('ratingValue').text)\n score = score.split('/10')[0].replace(',', '.')\n\n time.sleep(3)\n\n summary = str(self.browser.find_element_by_class_name('summary_text').text)\n subtext = str(self.browser.find_element_by_class_name('subtext').text)\n\n # Pull details that differ between movies and series\n try:\n duration = str(self.browser.find_element_by_class_name('bp_sub_heading').text) # Only for series\n if 'episodes' not in duration:\n duration = 'Some episodes'\n except Exception:\n # bp_sub_heading won't be found on a movie page\n duration = 'movie'\n\n if subtext[0].isdigit():\n # Split up the details from the subtext\n subtext_list = subtext.split(' | ')\n else:\n # Some movies' subtext starts with 'R' / 'PG-13'\n subtext_list = subtext.split(' | ')\n del subtext_list[0]\n\n # Duration\n if duration == 'movie':\n show_type = 'Movie'\n duration = subtext_list[0]\n try:\n year = datetime.datetime.strptime(subtext_list[2].split(' (')[0], '%d %B %Y').strftime('%Y')\n except ValueError:\n year = str(subtext_list[2].split(' (')[0][-4:])\n\n else: # series\n show_type = 'Serie'\n # Retrieve last season and its release date\n season_tab = str(self.browser.find_element_by_class_name('seasons-and-year-nav').text).strip()\n\n numbers = re.findall('[0-9]+', season_tab)\n latest_season = int(numbers[0])\n latest_year = int(max(numbers, key=lambda x: int(x)))\n\n duration += ' (%d Seasons in %d), %s per episode' % (latest_season, latest_year, subtext_list[0])\n\n year = re.findall('[0-9]+', subtext_list[2])[0]\n\n # Pull some more data out from the subtext\n genres = subtext_list[1].split(', ')\n\n # Pull details that are not always available\n creds_list = []\n creds = self.browser.find_elements_by_class_name('credit_summary_item')\n for c in creds:\n temp = str(c.text)\n if '|' in temp:\n temp = temp.split('|')[0]\n\n creds_list.append(temp)\n\n self.data_dict[title] = {\n 'score': score,\n 'summary': summary,\n 'duration': duration,\n 'credits': creds_list,\n 'genres': genres,\n 'released': year,\n 'type': show_type,\n }",
"def get_movie_info(movie_url):\n # 指定電影資訊的 CSS 選擇器\n rating_css = \"strong span\"\n genre_css = \".subtext a\"\n poster_css = \".poster img\"\n cast_css = \".primary_photo+ td a\"\n \n movie_doc = pq(movie_url)\n # 擷取資訊\n rating_elem = movie_doc(rating_css)\n movie_rating = float(rating_elem.text())\n genre_elem = movie_doc(genre_css)\n movie_genre = [x.text.replace(\"\\n\", \"\").strip() for x in genre_elem]\n movie_genre.pop()\n movie_poster_elem = movie_doc(poster_css)\n movie_poster = movie_poster_elem.attr('src')\n movie_cast_elem = movie_doc(cast_css)\n movie_cast = [x.text.replace(\"\\n\", \"\").strip() for x in movie_cast_elem]\n \n # 回傳資訊\n movie_info = {\n \"rating\": movie_rating,\n \"genre\": movie_genre,\n \"poster\": movie_poster,\n \"cast\": movie_cast\n }\n return movie_info",
"def get_title_artist(title_element): \n \n \n title_token = title_element.text.split(\" \")\n\n word = title_token.pop(0)\n artist = ''\n title = ''\n first = True\n while(title_token != [] and word != '-' and word[-1] != '-'):\n if first:\n first = False\n artist += (word)\n else:\n artist += ' '\n artist += word\n\n word = title_token.pop(0)\n \n if word[-1] == '-':\n word = word[:-1]\n artist += word\n \n if title_token == []:\n print(\"ERROR HERE: \", title_element.text)\n return None, None\n \n word = title_token.pop(0)\n first = True\n\n while(True):\n if first:\n first = False\n title += word\n else:\n title += ' '\n title += word\n if title_token != []:\n word = title_token.pop(0)\n if word == \"ALBUM\" or (word == \"EP\" and title_token[0] == \"REVIEW\"):\n break\n else:\n break\n return title, artist",
"def movie_spider(self, movieTag):\n index = 0\n logging.info(\"Start crawling tag: %s\" % movieTag)\n while index < self.MAX_NUM:\n root = \"https://movie.douban.com/tag/%s?start=%d&type=T\" % (movieTag, index)\n result = {}\n try:\n html = requests.get(root, headers=random.choice(self.headers)).content\n tree = etree.HTML(html.decode('utf-8'))\n items = tree.xpath(\"//table/tr[@class='item']\")\n if len(items) == 0:\n break\n index += len(items)\n for item in items:\n itemURL = item.xpath(\"td/a[@class='nbg']/@href\")[0].strip()\n itemHTML = requests.get(itemURL, headers=random.choice(self.headers)).content\n itemTree = etree.HTML(itemHTML.decode('utf-8'))\n title = itemTree.xpath(\"//h1/span[@property='v:itemreviewed']/text()\")[0].strip()\n info = itemTree.xpath(\"//div[@class='subject clearfix']/div[@id='info']\")[0]\n director = info.xpath(\".//a[@rel='v:directedBy']/text()\")\n scriptor = info.xpath(\"span\")[1].xpath(\"span/a/text()\") # scriptor is not well formatted\n actors = info.xpath(\".//a[@rel='v:starring']/text()\")\n genre = info.xpath(\".//span[@property='v:genre']/text()\")\n initDate = info.xpath(\".//span[@property='v:initialReleaseDate']/text()\")\n runtime = info.xpath(\".//span[@property='v:runtime']/text()\")\n rating = itemTree.xpath(\"//strong[@property='v:average']/text()\")[0].strip()\n \n result['title'] = title\n result['director'] = '/'.join(director[:])\n result['scriptor'] = '/'.join(scriptor[:])\n result['actors'] = '/'.join(actors[:])\n result['genre'] = '/'.join(genre[:])\n result['initDate'] = '/'.join(initDate[:])\n result['runtime'] = '/'.join(runtime[:])\n result['rating'] = rating\n\n self._movie_list.append(result)\n result = {}\n\n except Exception as e:\n logging.exception(\"Error while crawling tag: %s\" % movieTag)",
"def fetch_title(url):\n # validate url.\n if \"http\" not in url or len(url) <= 11:\n return \"\"\n r = requests.get(url)\n if r:\n soup = BeautifulSoup(r.text, 'html.parser')\n try:\n title = soup.select(\"title\")[0].string\n except:\n title=\"\"\n else:\n title=\"\"\n return title"
] | [
"0.6832312",
"0.6682347",
"0.66435677",
"0.6387017",
"0.637413",
"0.63639027",
"0.633812",
"0.6296725",
"0.61997265",
"0.6172732",
"0.6140767",
"0.61179507",
"0.60745186",
"0.60595584",
"0.59533656",
"0.59527224",
"0.5951921",
"0.5943582",
"0.5937595",
"0.59366393",
"0.5929587",
"0.59238297",
"0.59185255",
"0.59182614",
"0.5907752",
"0.5899259",
"0.58820254",
"0.58790547",
"0.58673507",
"0.5861203"
] | 0.7373919 | 0 |
Calculate total episode count based on neighbouring sameextension files. | def get_tv_episodes(self) -> int:
return len(glob.glob(os.path.join(
os.path.dirname(self.file),
f"*{os.path.splitext(self.file)[-1]}"
))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_episodes(self):\n raise NotImplementedError",
"def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order",
"def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games",
"def get_num_episodes(self) -> int:\n return len(self.episodes)",
"def get_amount_episodes(episodes: str) -> int:\n a = 0\n for ep in episodes.split(', '):\n if '-' in ep:\n start,end = ep.split('-')\n a += int(end)-int(start)\n else:\n a += int(ep)\n return a",
"def episodes_done_inc(self):\n with _MonitorEnv._lock:\n self._episodes_done += 1\n return self._episodes_done",
"def get_num_instances_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n if self.mode_last_patch == 'discard':\n # the last patch that is always incomplete is discarded\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n elif self.mode_last_patch == 'fill':\n # the last patch that is always incomplete will be filled with zeros or signal, to avoid discarding signal\n # hence we count one more patch\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, 1 + int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n return num_instances_per_file",
"def get_num_instances_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n file_frames = float(shape[0])\n if self.mode_last_patch == 'discard':\n # the last patch that is always incomplete is discarded\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n elif self.mode_last_patch == 'fill':\n # the last patch that is always incomplete will be filled with zeros or signal, to avoid discarding signal\n # hence we count one more patch\n if self.patch_len == 25 and self.patch_hop == 13 and file_frames == 51:\n num_instances_per_file = 3\n else:\n num_instances_per_file = np.maximum(1, 1 + int(np.ceil((file_frames - self.patch_len - 1) / self.patch_hop)))\n\n return num_instances_per_file",
"def fileCounter(directory):",
"def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def parse_episode_part(filename):\n print_info('Extracting part num from {0}'.format(filename))\n baseline = ord('a')\n\n for regex in EPISODE_PART_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_part = m.group('Part').lower()\n print_info('Extracted Part: {0}'.format(extracted_part))\n\n # Convert into int\n part_num = ord(extracted_part) - baseline + 1\n return part_num\n\n return 0",
"def fileCount(self):\n pass",
"def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations",
"def find_n(self):\n metadata_files = [\n file for file in self.cfg[\"input_files\"]\n if \"tas/metadata.yml\" in file\n ]\n self.cfg[\"N\"] = {}\n for meta_file in metadata_files:\n n_identifyer = meta_file.split(\"/tas/\")[0].split(\"/tas_\")[-1]\n metadata = group_metadata(get_cfg(meta_file).values(), \"dataset\")\n self.cfg[\"N\"][n_identifyer] = len(metadata.keys()) - 1",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def get_faces_nr(self):\r\n\r\n logger.debug('Getting number of faces in each frame')\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n\r\n if os.path.exists(self.track_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n\r\n return\r\n\r\n self.faces_nr = {}\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n for frame_dict in frame_list:\r\n\r\n frame_name = frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n if frame_name in self.faces_nr:\r\n\r\n self.faces_nr[frame_name] += 1\r\n\r\n else:\r\n\r\n self.faces_nr[frame_name] = 1\r\n\r\n # Save YAML file\r\n\r\n utils.save_YAML_file(self.faces_nr_path, self.faces_nr)",
"def count_exsiting_data(target_dir, check_all_number=True):\n num_color = len(glob.glob(osp.join(target_dir, '*', 'color', '*png')))\n\n if not check_all_number:\n num_color\n\n num_depth = len(glob.glob(osp.join(target_dir, '*', 'depth', '*npy')))\n num_camera_info = len(glob.glob(osp.join(\n target_dir, '*', 'camera_info', '*yaml')))\n\n assert num_color == num_depth == num_camera_info,\\\n '{} num_color:{} num_depth:{}, num_camera_info:{}'.format(\n target_dir, num_color, num_depth, num_camera_info)\n\n return num_color",
"def getFileCount(self) -> int:\n ...",
"def day_03_a() -> int:\n return len(get_visited_houses(read_instructions('aoc/aoc2015/input/03A.txt')))",
"def compute_way(episode):\n episode_classes, _ = tf.unique(episode.train_labels)\n way = tf.size(episode_classes)\n return way",
"def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)",
"def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total",
"def numberFiles(self):\n return self.n",
"def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number",
"def _find_epochs(self, history):\n \n epoch_count = len(history.history['val_loss'])\n\n return epoch_count"
] | [
"0.58617145",
"0.5788146",
"0.5688501",
"0.5621688",
"0.551865",
"0.5483126",
"0.54120064",
"0.54120064",
"0.5410899",
"0.5262325",
"0.52515316",
"0.5230109",
"0.52180594",
"0.52164894",
"0.5211398",
"0.52089113",
"0.52078176",
"0.5205271",
"0.5205262",
"0.5201948",
"0.5186348",
"0.51619774",
"0.514821",
"0.51342314",
"0.5114938",
"0.50808835",
"0.5070604",
"0.5068788",
"0.5057877",
"0.50557125"
] | 0.6543224 | 0 |
Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. | def get_release_name(self) -> str:
if self.season is not None and self.episode is None:
return os.path.basename(os.path.dirname(self.file))
return os.path.splitext(os.path.basename(self.file))[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getApplicationReleaseName(self) -> unicode:\n ...",
"def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname",
"def __get_parsed_video_file_path(season: int, episode: int) -> str:\n return rootpath.detect() + \"/\" + SAVE_FOLDER + \"s\" + str(season) + \"e\" + str(episode) + \".data\"",
"def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"",
"def getReleaseVersion(self, workingTowerName, infixStream):\n towerInfix = iccs_apex.whatInfixIsStream(workingTowerName)\n prefixStream, postfixStream = string.split(workingTowerName, towerInfix)\n releaseVersion, postVersion = string.split(postfixStream, \"wrk\")\n releaseTowerName = infixStream + releaseVersion + \"rel\"\n \n return releaseTowerName",
"def season_folder(cls, season):\r\n\r\n\t\t'''# Google Drive downloads replace these characters automatically\r\n\t\t# I'm implementing this in the code as well for convenience\r\n\t\tseason = season.replace(\"&\", \"_\")\r\n\t\tseason = season.replace(\"'\", \"_\")'''\r\n\r\n\t\t# Folder names are ANSI versions of the season name\r\n\t\t# This is important in names like \"Lé Unicorn\" which get\r\n\t\t# converted incorrectly as folder names\r\n\t\tseason = season.encode(encoding=\"utf-8\")\r\n\t\tseason = season.decode(encoding=\"cp1252\", errors=\"ignore\")\r\n\r\n\t\treturn season",
"def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release",
"def path(self):\n if self.filename:\n return os.path.join(self.season.path, self.filename)",
"def parse_season(filename):\n print_info('Attempting to parse {0}'.format(filename))\n print_info('Extracting season from {0}'.format(filename))\n for regex in SEASON_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_season = m.group('Season').lower()\n print_info('Extracted season: {0}'.format(extracted_season))\n\n season_num = int(extracted_season)\n if season_num is not None and season_num > 0:\n print_info('Season might be: {0}'.format(season_num))\n return 'S' + format_num(season_num)\n return 'S01'",
"def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)",
"def media_series_title(self):\n if lgtv[\"pairingKey\"] == 0:\n return \"Pin not set\"\n if self._currentSourceNumber == \"0\":\n return (\"{0} - CH{1:d} - {2}\").format(self._currentSourceName, self._currentChannelNumber, self._currentChannelName)\n else:\n return \"\"",
"def get_filename(self):\n return self.get_package_name() + '.' + ARCH + \".rpm\"",
"def makeReleaseFileName(cls, version: str) -> str:\n\n from peek_platform import PeekPlatformConfig\n\n return os.path.join(\n PeekPlatformConfig.config.platformSoftwarePath,\n 'peek-release-%s.tar.gz' % version)",
"def get_package_name(self, default=-1):\n specs = glob.glob(f'{self.distgit_repo().distgit_dir}/*.spec')\n if len(specs) != 1:\n if default != -1:\n return default\n raise IOError('Unable to find .spec file in RPM distgit: ' + self.qualified_name)\n\n spec_path = specs[0]\n with open(spec_path, mode='r', encoding='utf-8') as f:\n for line in f.readlines():\n if line.lower().startswith('name:'):\n return line[5:].strip() # Exclude \"Name:\" and then remove whitespace\n\n if default != -1:\n return default\n\n raise IOError(f'Unable to find Name: field in rpm spec: {spec_path}')",
"def media_title(self):\n if lgtv[\"pairingKey\"] == 0:\n return \"Pin not set\"\n if self._currentSourceNumber == \"0\":\n return self._currentProgram\n else:\n return self._currentSourceName",
"def _parse_fname_season(cmd_args):\n f_name_base = 'sfav2_CONUS_{}_to_{}'\n season_start = '093012' # Start month, day, & hour for seasonal accum\n\n date_in = adjust_date(cmd_args)\n\n if (not check_ftype(cmd_args)):\n print('{} not valid for seasonal accumulation period. Downloading as NetCDF'.format(cmd_args.f_type))\n f_type = 'nc'\n else:\n f_type = cmd_args.f_type\n\n # If we are in the new year of the winter season (i.e., Jan 2020 of the\n # 2019-2020 winter season), adjust the start year defining the winter season\n if (date_in.month < 9):\n start_yr = date_in.year - 1\n else:\n start_yr = date_in.year\n\n date_start = '{}{}'.format(start_yr, season_start)\n date_end = datetime.strftime(date_in, '%Y%m%d%H')\n\n f_name = f_name_base.format(date_start, date_end)\n\n f_name = '{}.{}'.format(f_name, f_type)\n\n return f_name",
"def derive_mod_name(self):\n\n # a) if we're lucky, this is a Fomod install w/ a modname attr\n # TODO: some non-Fomod mods still include an \"info.xml\" file\n if self.has_fomod:\n fname = self.fomod.modname.name\n # fix: the fomod name often includes a version number on the end (like \"Soul Gem Things v1.4.5\")\n vmatch = _version_format.search(fname)\n if vmatch:\n fname = fname[:vmatch.start()].strip()\n\n print(\"fomod found:\")\n print(\" orig:\", self.fomod.modname.name)\n print(\" name:\", fname)\n\n # return self.fomod.modname.name\n return fname\n\n # if not, we'll have to get clever\n\n # b) if the mod includes esp/bsa/etc. files, they're often\n # labeled with the mod's \"real\" name\n bname = os.path.basename\n split = os.path.splitext\n\n # check top 2 levels\n # accumulate names\n _names = []\n ext_re = re.compile(r\".*\\.(es[pm]|bsa)$\")\n for f in filter(lambda s: ext_re.search(s.lower()),\n self.archive_files):\n # if re.search(r\".*\\.(es[pm]|bsa)$\", f.lower()):\n _names.append(split(bname(f))[0])\n\n print(f\"names from esp/bsa ({len(_names)}):\")\n for n in _names:\n print(f\" {n}\")\n\n # c) see if we can figure it out from the archive name;\n # try to ignore the version numbers\n archive_name = self.arc_path.stem\n\n # archives downloaded from the nexus generally have\n # the mod name, then a hyphen followed by the modid, then\n # (optionally) another hyphen and version info\n m = _nexus_archive_name_format.match(archive_name)\n\n if m:\n name = m['name']\n\n # TODO: if we can get the modid, we should be able to look up the mod info on the nexus...though that would of course require writing an async web-request module...\n modid = m['modid']\n ver = m['version']\n\n if name:\n # ==> eventually, this should pull the name from the nexus\n\n # sometimes there's some extra stuff like (redundant)\n # version info on the end of the name\n exm = _extra_stuff.search(name)\n if exm:\n name = name[:exm.start()]\n\n if ver:\n ver = ver.replace(\"-\", \".\")\n\n print(\"Derived from archive name:\")\n print(\" name:\", name)\n print(\" modid:\", modid)\n print(\" version:\", ver)\n return name\n\n return \"\"",
"def bsw_getCurrentAssetMainFileName():\n projectShortName = ProjectNamingInputs().projectShortName\n # get asset UID from the kns_getAssetDetails function (second last return is assetUID).\n assetUID = bsw_getAssetDetails()[-2]\n if os.environ['BSW_PROJECT_TYPE'] == 'series':\n return projectShortName + '_' + assetUID.split('_')[1] + '_' + assetUID.split('_')[2] + '_' + \\\n assetUID.split('_')[-1] + '_' + assetUID.split('_')[-2] + '.ma'\n else:\n return projectShortName + '_' + assetUID.split('_')[1] + '_' + assetUID.split('_')[2] + '_' + \\\n assetUID.split('_')[-1] + '.ma'",
"def GetOutputFilename(self, directory=None):\n if self.forced_filename:\n logging.debug('Forced filename or pre-computed file name = %s', self.filename)\n return self.filename\n\n tags = dict()\n\n # Base tag\n tags['base'] = f\"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}\"\n\n # Setup version subinfo\n tags['version'] = f\" ({self['VERSION']})\" if self[\"VERSION\"] else \"\"\n\n # Setup label / release subinfo\n channels = self.channels if self.channels != '2.0' else ''\n if self[\"ORIGINAL_MEDIUM\"] == \"CD\":\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {channels}\"\n else:\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}\"\n labeltag = labeltag.strip()\n tags['label'] = labeltag and f\" ({labeltag})\"\n\n # Setup disc tag\n if self[\"PART_NUMBER\"]:\n disctag = f\" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}\"\n else:\n disctag = f\" {self['DISC_NAME']}\"\n tags['disc'] = disctag.rstrip()\n\n # Merge into filename\n filename = f\"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}\"\n # Replace invalid characters with either a dash or remove them\n filename = re.compile(\"[<>:/\\\\\\\\]\").sub(\"-\", filename)\n filename = re.compile(\"[|?*]\").sub(\"\", filename)\n # Replace invalid double quotes with valid single quotes\n filename = filename.replace('\"', \"'\")\n\n if directory:\n return os.path.join(directory, filename)\n return filename",
"def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'",
"def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])",
"def media_season(self):\n media_status = self._media_status()[0]\n return media_status.season if media_status else None",
"def episode_title_for_tvdb(self):\n \n # strip out the year from the episode title:\n return re.sub('(Part )(?P<part>\\d+)','(\\g<part>)',self.episode_title)",
"def build_album_name(album):\n try:\n name, year = album.name, album.release_year\n except AttributeError:\n name, year = album.text(0), album.text(1)\n if year:\n name += f' ({year})'\n return name",
"def _get_name_constellation_specific(self) -> str:\n\n try:\n if self.is_archived:\n footprint_path = files.get_archived_path(self.path, r\".*\\.shp\")\n else:\n footprint_path = next(self.path.glob(\"*.shp\"))\n except (FileNotFoundError, StopIteration):\n raise InvalidProductError(\n \"Footprint shapefile cannot be found in the product!\"\n )\n\n # Open identifier\n name = files.get_filename(footprint_path)\n\n return name",
"def _get_track_name(self, filename):\n return os.path.basename(filename)",
"def get_package_repo_name(package_info):\n\n # should check that there is EXACTLY one line\n repo_lines = \\\n [line for line in package_info if line.startswith(\"From repo \")]\n\n # \"From repo : <repo name>\"\n # Get the value and remove white space.\n if len(repo_lines) > 0:\n repo_name = repo_lines[0].split(':')[1].strip()\n else:\n repo_name = None\n\n return repo_name",
"def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date",
"def get_competition_season_type(season):\n default_type = games.models.CompetitionSeason.winter\n try:\n season_name = season.name\n if season_name.find(\"/\") == -1:\n return games.models.CompetitionSeason.summer\n return default_type\n except Exception as e:\n return default_type",
"def seasonEpisode(self):\n return f's{str(self.seasonNumber).zfill(2)}e{str(self.episodeNumber).zfill(2)}'"
] | [
"0.59712",
"0.59365255",
"0.58621913",
"0.57642496",
"0.5731158",
"0.5726277",
"0.57143974",
"0.5686276",
"0.5618006",
"0.557107",
"0.5563853",
"0.5559816",
"0.5543877",
"0.55142844",
"0.55000263",
"0.5495673",
"0.5444121",
"0.5430501",
"0.5416234",
"0.54096",
"0.5329431",
"0.5307731",
"0.5286652",
"0.52817094",
"0.527324",
"0.5264124",
"0.5260512",
"0.5250874",
"0.5247147",
"0.524125"
] | 0.81937546 | 0 |
Get a wide banner image from fanart.tv. Currently restricts banners to Englishonly. | def get_banner_image(self, tvdb_id: int) -> Optional[str]:
if not tvdb_id:
return None
if not self.fanart_api_key:
raise ValueError("Need Fanart.tv api key for TV titles!")
r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}")
if r.status_code == 404:
return None
res = r.json()
error = res.get("error message")
if error:
if error == "Not found":
return None
raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}")
banner = next((
x["url"] for x in (res.get("tvbanner") or [])
if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language
), None)
return banner | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_banner(self, width=300, height=85):\n img_path = IMG_PATH + os.sep + CARD_BANNER\n banner_img = Image.open(img_path)\n banner_img = banner_img.resize((width, height))\n return banner_img",
"def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )",
"def get_banner(self,context,request):\n ba = queryMultiAdapter((context,request), interfaces.IBanner)\n if not ba:\n return ''\n return ba()",
"def getBanner(outputScan):\n try:\n return str(outputScan.split(\", Banner: \", 1)[1][:12])\n #banner = re.search(r\"[0-9A-F]{12}\",outputScan, re.MULTILINE).group()\n #return str(banner)\n except Exception as e:\n print '\\033[91m'+\"ERROR_BANNER\"\n return \"BANNER_ERROR\"",
"def choose_banner(banners):\n # simple random\n n = random.randint(0, len(banners)-1)\n return banners[n]",
"def __get_high_res_url(country) -> str:\n wiki_stem = \"https://en.wikipedia.org\"\n country_page = requests.get(f\"{wiki_stem}/wiki/{country}\")\n country_doc = HTML(country_page.content)\n [v_card] = country_doc.xpath('.//table[@class=\"infobox geography vcard\"]')\n [flag_elem] = v_card.xpath('.//a[@class=\"image\" and contains(@title, \"Flag\")]')\n flag_page_url = f\"{wiki_stem}{flag_elem.attrib['href']}\"\n flag_page = requests.get(flag_page_url)\n doc = HTML(flag_page.content)\n [flag_url_elem] = doc.xpath('.//div[@id=\"file\"]/a/img')\n return f\"https:{flag_url_elem.attrib['src']}\"",
"def get_banner(conn) -> str:\n banner_data = conn.recv(1024)\n banner = banner_data.decode().strip()\n print('Banner: {}'.format(banner))\n return banner",
"def banner(self):\n return self._banner",
"def getImage(cardTitle, size=\"normal\"):\n page = requests.get(\"https://api.scryfall.com/cards/named?exact=\"+name)\n page_json = json.loads(page.content)\n image_link = page_json[\"image_uris\"][size]\n image_response = requests.get(image_link)\n img = Image.open(BytesIO(image_response.content))\n return img.resize((384, 535)).convert(\"1\")",
"def bbs_show_banner(tn, short = True):\n lines = cmd.lban(tn, short_banner = short)\n for line in lines:\n print(filter_tags(line))",
"def get_url_for_min_resolution(self, min_height, min_width, image):",
"def download_banner(self, banner_path):\n serie = self._root.find('Series')\n banner = unicode(serie.find('banner').text)\n if banner != '' and not os.path.isfile(banner_path):\n urllib.urlretrieve(self.URL_BANNER + banner, banner_path)",
"def page_title(id):\r\n\tswitcher = {\r\n\t\t\"404\": \"Error 404: Not Found - WWW2PNG\",\r\n\t\t\"api_activate\": \"API Key Activated - WWW2PNG\",\r\n\t\t\"api_help\": \"API Help - WWW2PNG\",\r\n\t\t\"api_request\": \"API Key Requested - WWW2PNG\",\r\n\t\t\"buried\": \"Manage Buried - WWW2PNG\",\r\n\t\t\"contact\": \"Contact Us - WWW2PNG\",\r\n\t\t\"default\": \"Free Webpage Screenshot Service API with Blockchain Anchoring - WWW2PNG\",\r\n\t\t\"error\": \"Error - WWW2PNG\",\r\n\t\t\"pp\": \"Privacy Policy - WWW2PNG\",\r\n\t\t\"tos\": \"Terms of Service - WWW2PNG\",\r\n\t}\r\n\treturn switcher.get(id, \"WWW2PNG\")",
"def get_image(result):\n article_id = result['id']\n id_ = article_id[14:]\n href = article_id[:14]\n\n #FIXME: not working\n image_url = \"http://www.jpress.nli.org.il/Olive/APA/NLI_heb/get/GetImage.ashx?kind=block&href=%s&id=%s&ext=.png\" %(href, id_)\n \n return image_url",
"def getThumb(url,tvdbId=None):\n\tret = None\n\tif (tvdbId is not None and Prefs['fanart'] is True):\n\t\tthumb = fanartScrapper.getRandImageOfTypes(tvdbId,['tvthumbs'])\n\t\tif thumb is None: thumb = url\n\t\turl=thumb\n\t\n\tif url==R(CRUNCHYROLL_ICON):\n\t\tret = url\n\telse:\n\t\tif url is not None:\n\t\t\ttry:\n\t\t\t\tdata = HTTP.Request(url, cacheTime=CACHE_1WEEK).content\n\t\t\t\tif url.endswith(\".jpg\"):\n\t\t\t\t\tret = DataObject(data, 'image/jpeg')\n\t\t\t\telif url.endswith(\".png\"):\n\t\t\t\t\tret = DataObject(data, 'image/png')\n\t\t\texcept Exception, arg:\n\t\t\t\tLog.Error(\"#####Thumbnail couldn't be retrieved:\")\n\t\t\t\tLog.Error(\"#####\" + repr(Exception) + repr(arg) + url)\n\t\t\t\tret = None\n\n\tif ret is None:\n\t\treturn R(CRUNCHYROLL_ICON)\n\telse:\n\t\treturn ret",
"def getNewsIconURL(newsBrain):",
"def banner(name):\n print \"#\"\n print \"# {0}\".format(name.encode('utf-8'))\n print \"#\"\n return name",
"def Banner():\n main_banner = pyfiglet.figlet_format(\" UTM NAT\", font = \"slant\")\n sub_banner1 = pyfiglet.figlet_format(\"tool\", font = \"isometric1\")\n sub_banner2 = \" -Generate a CSV file of Sophos UTM NAT statements-\"\n sub_banner3 = \" via REST API using the power of Python\"\n\n print()\n print('=' * 62)\n print(main_banner)\n print(sub_banner1)\n print()\n print(sub_banner2)\n print(sub_banner3)\n print()\n print('=' * 62)\n print()",
"def get_thumbnail(format):",
"def p_banner():\n return random.choice([banner, banner_two, banner_three, banner_four, banner_five])",
"def get_banner_layout(app):\n banner_layout = html.Div(className='row', id=\"banner\",\n children=[html.Div(\n html.Img(src=app.get_asset_url(\"252px-Rheem_logo.svg.png\"), style={\"width\": \"30%\",\n \"vertical-align\": \"middle\"}),\n className='two columns'),\n html.Div(html.H3(\"Odin Project: Heat Pump Water Heater Gen V Field Test\",\n className='header', id=\"title\", style={\"letter-spacing\": \"-1.6px\"}),\n className=\"ten columns\")],\n )\n return banner_layout",
"def get_berlin_picture(conv: V2DialogflowConversation) \\\n -> V2DialogflowConversation:\n conv.google.tell('Hier ist ein Bild aus Berlin!')\n\n # Use the user_storage field to ensure that the user is shown a different\n # image each time they invoke this intent. User_storage works like a\n # dictionary and is serialized with json.dumps.\n if not conv.google.user.user_storage.get('fernsehturm_shown'):\n conv.google.show_basic_card(FERNSEHTURM_CARD)\n conv.google.user.user_storage['fernsehturm_shown'] = True\n else:\n conv.google.show_basic_card(BRANDENBURGER_TOR_CARD)\n conv.google.user.user_storage['fernsehturm_shown'] = False\n\n return conv",
"async def olá(self):\r\n\t\tawait self.client.say('© Maddie 2017')\r\n\t\te = Embed()\r\n\t\te.set_image(url='https://cdn.discovery.pgsitecore.com/en-us/-/media/Olay_PathFinder/Images/a/OLAY%20TE%207IN1%20DEEP%20PENETRATING%20MOISTURE%20BODY%20WASH_Front.png?w=460&v=1-201705260605')\r\n\t\tawait self.client.say(embed=e)",
"def get_country_image_name(country):\n\n country = country.replace(\" \", \"-\").replace(\".\", \"\").lower()\n return \"%s.png\" % (country)",
"def getThumbUrl(url, tvdbId=None):\n\tif (tvdbId is not None and Prefs['fanart'] is True):\n\t\tthumb = fanartScrapper.getRandImageOfTypes(tvdbId,['tvthumbs'])\n\t\tif thumb is not None: return thumb\n\n\n\tif url==R(CRUNCHYROLL_ICON):\n\t\treturn url\n\t\n\treturn url",
"def l10n_img(ctx, url):\n return static(l10n_img_file_name(ctx, url))",
"def banner_url(self) -> typing.Optional[files.URL]:\n return self.make_banner_url()",
"def show_banner():\n print(\"\"\"\n _ _ _ _ _____ _______\n| | | | / \\ | | |_ _\\ \\ / / ____|\n| |_| | / _ \\ | | | | \\ \\ / /| _|\n| _ |/ ___ \\| |___ | | \\ V / | |___\n|_| |_/_/ \\_\\_____|___| \\_/ |_____|\n\n\nA super fast asynchronous http and https prober, to check who is (h)alive.\nDeveloped by gnc\n \"\"\")",
"def get_kegg_image(self):\n return 'http://rest.kegg.jp/get/%s/img' % self.kegg_id",
"def present_banner():\n writer(BANNER, FORMAT[\"BANNER\"])\n writer(\" \" * 30 + f\"version {VERSION}\")"
] | [
"0.63562655",
"0.5907053",
"0.58398026",
"0.56723255",
"0.55253977",
"0.55232245",
"0.55212253",
"0.5516398",
"0.532808",
"0.5248593",
"0.52476937",
"0.52444804",
"0.5240686",
"0.51916087",
"0.51215434",
"0.5034326",
"0.5020774",
"0.5014858",
"0.49858487",
"0.4975781",
"0.4969387",
"0.49642497",
"0.4920303",
"0.48985156",
"0.48932984",
"0.48927313",
"0.48841828",
"0.4860148",
"0.4858901",
"0.48506686"
] | 0.6869307 | 0 |
Return a list of a brief subtitle overview persubtitle. e.g. English, Forced, SubRip (SRT) English, SubRip (SRT) English, SDH, SubRip (SRT) Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the ` ` already prepended to each entry. | def get_subtitle_print(subs: List[Track]) -> List[str]:
data = []
if not subs:
data.append("--")
for sub in subs:
line_items = []
# following sub.title tree checks and supports three different language and title scenarios
# The second scenario is the recommended option to choose if you are open to choosing any
# The third scenario should be used if you have nothing unique to state about the track
# | Language | Track Title | Output |
# | ------------ | ----------------------------- | --------------------------------------------- |
# | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |
# | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |
# | es / Spanish | None | - Spanish, SubRip (SRT) |
language = pycountry.languages.get(alpha_2=sub.language).name
if sub.title:
if language.lower() in sub.title.lower():
line_items.append(sub.title)
else:
line_items.append(f"{language}, {sub.title}")
else:
line_items.append(language)
line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)"))
line = "- " + ", ".join(line_items)
data += [
(" " + x if i > 0 else x)
for i, x in enumerate(textwrap.wrap(line, 64))
]
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_title(self) -> list:\n scanning = False # start of a title is found, this may be the second of later part of that.\n ret = [] # to return\n temp = [] # deal with mutiple line titles.\n for page in self.pdf.pages:\n text = page.extract_text()\n # it's possible that a blank page exists which will let text be None.\n if text is None:\n continue\n lines = text.split('\\n')\n\n for line in lines:\n if self.__is_part_of_title(line):\n # middle part of a title\n if scanning:\n temp.append(line)\n # find a new title\n else:\n scanning = True\n temp = [line]\n else:\n # just find an entire title\n if scanning:\n scanning = False\n ret.append(\"\".join(temp))\n # remove wrong titles ( maybe trigger words occur at other part of the document )\n for title in ret:\n if self.title_keyword not in title:\n ret.remove(title)\n return ret",
"def get_subtitles(self, title):\n return library.subtitles.get_subtitle_url(title)",
"def get_title(self):\n return [i['title'] for i in self]",
"def getatitle(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(r'\\s+', allcontent[i])\n if words[0] == \"Title\":\n for j in range(2, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))",
"def getTitle(movieInfo):\n if \"title\" in movieInfo:\n #We remove the punctuation\n title = \"\".join(c for c in movieInfo[\"title\"] if c not in punctuation)\n #We return the title as a list of words in the right format\n return [ _format(w) for w in title.split() ]\n else:\n raise AttributeError(\"%s instance has no attribute title\" % movieInfo)",
"def mush_title(title):\n words = title.split(\" \")\n mushed_title = \"\"\n for word in words:\n mushed_title += word\n return [mushed_title]",
"def book_title(title):\n # this will capitalize the first letter of every word\n title = title.title()\n pre_title = []\n pre_title = title.split(\" \")\n new_title = \"\"\n for word in pre_title:\n # If the word is the first word of the title it has to be capitalize\n if word != pre_title[0]:\n # If the word is in the small word list make it lower case\n if word.lower() in small_words:\n word = word.lower()\n new_title = new_title + word + ' '\n# Remove the lagging space \n return new_title.strip()",
"def title_words(self):\n\n if self._title_words == []:\n for s in self.title():\n for w in s.split():\n self._title_words.append(w)\n\n return self._title_words",
"def list_titles(genre):\n text = genre_html(genre)\n num_titles = text.count('title=')\n\n titles = []\n for i in range(num_titles):\n start = text.find('title=')\n end = text[start+7:].find('\">')\n title = text[start+7:start+end]\n titles.append(title)\n text = text[start+7:]\n\n return titles",
"def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)",
"def getlistofpossibletitles(fileitem,shows):\n title = []\n title.append(fileitem)\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n return title",
"def extract_subtitle_track(path_to_mkv):\n handler = SubtitleHandler()\n with open(path_to_mkv, \"rb\") as fp:\n mkvparse.mkvparse(fp, handler)\n\n return handler.subs",
"def getFullCourseTitle(self, brain):\n full_title = ''\n\n id = brain.getCourseId\n if id:\n full_title = '%s - ' %id\n full_title += brain.Title\n term = brain.getTerm\n if term:\n full_title += ', %s' %term\n\n return full_title",
"def Titles(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('titles', default)\n return [HEP.TitleObject(i) for i in tmp]",
"def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]",
"def check_title(title_list):\n for w_index in range(len(title_list)):\n title_list[w_index] = title_list[w_index].replace('_', ' ')\n return [word for word in title_list if word.istitle()]",
"def TitleInfo(currentFile):\n Title=[]\n with open(currentFile) as fileIn:\n print(':\\033[1;31mI\\033[1;m')\n print('PDB File:\\033[1;31m %s\\033[1;m'%currentFile)\n line = fileIn.readline()\n while line:\n if line.startswith('TITLE'):\n Title.append(line)\n\n line = fileIn.readline()\n if len(Title) == 1:\n Str = \"\".join(Title)\n x = Str.replace('TITLE', '')\n Str1 = x.lstrip()\n print('Title: %s'%Str1)\n if len(Title) > 1:\n #Str = \"\".join(l)\n t =(Title[0])\n z = (Title[1])\n t1 = t.replace('TITLE', '')\n z1 = z.replace('TITLE', '')\n z2 = z1.replace('2', '')\n t2 = t1.strip()\n z3 = z2.strip()\n print('Title:%s'%t2+z3)\n #return Title",
"def all_title() -> list:\n return [i[\"title\"] for i in Blogs_Manager.TablePost.all_query()]",
"def title_string(self):\n return ' '.join(self.title).replace(' - ', '')",
"def get_subtitles(self, index: int):\n\n match = self.re_subs[index - 1]\n start = convert_subs_time(match[1])\n end = convert_subs_time(match[2])\n subtitles = match[3]\n subtitles = clean_text(subtitles)\n\n return (subtitles, start, end)",
"def title(self):\n return asarray(title(self))",
"def title(self):\n return ' '.join(self._title)",
"def get_sub_title(self, article: BeautifulSoup):\n return self.get_text(article, self.parsing_template.sub_title)",
"def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title",
"def fix_subtitles(title):\n\n new_title = LINE_BREAK_PATTERN.sub(': ', title, 1)\n return LINE_BREAK_PATTERN.sub('; ', new_title)",
"def all_titles(our_data):\n return [album['album'] for album in our_data]",
"def print_title(title, subtitle=None):\n print(title)\n if subtitle:\n print(subtitle)\n bar_len = max(len(title), len(subtitle))\n else:\n bar_len = len(title)\n bar = '=' * bar_len\n print(bar)",
"def show_list(site, titles): # type: (str, List[str]) -> None\n print(u\"The latest tutorials from {}\".format(site))\n for article_id, title in enumerate(titles):\n print(u\"{:>3} {}\".format(article_id, title))",
"def get_book_titles(self, lib_db):\n titles = []\n conn = sqlite3.connect(lib_db)\n c = conn.cursor()\n for row in c.execute(\"SELECT ZTITLE FROM ZBKLIBRARYASSET WHERE ZTITLE <> '' AND ZTITLE <> 'none'\"):\n titles.append(row[0])\n conn.close()\n return titles",
"def getSubtitles(self):\n\n self.createSoupObject()\n self.getcustomerID()\n self.getToken()\n self.getTitle()\n\n if self.debug:\n print(self.title)\n\n self.getVideoType()\n if self.debug:\n print(self.videoType)\n\n if self.videoType == \"movie\":\n\n self.getAsinID1() # Method-1\n if self.debug:\n print(self.parametersDict['asin'])\n\n returnValue = self.standardFunctionCalls()\n if returnValue != 1:\n self.videoType = \"tv\"\n\n if self.videoType != \"movie\":\n\n self.getAsinID2()\n if self.debug:\n print(self.asinList)\n\n self.parametersDict['asin'] = self.asinList\n currentTitle = self.title\n\n try:\n returnValue = self.standardFunctionCalls()\n except:\n pass\n self.title = currentTitle\n\n return returnValue"
] | [
"0.6559989",
"0.626816",
"0.61477166",
"0.59452933",
"0.5862041",
"0.58259624",
"0.57575667",
"0.5716959",
"0.57007366",
"0.56698",
"0.566014",
"0.5631774",
"0.56170785",
"0.5601238",
"0.55479985",
"0.55277705",
"0.55247784",
"0.55187654",
"0.5466009",
"0.5463526",
"0.54340214",
"0.54149383",
"0.5401396",
"0.54010487",
"0.5399305",
"0.53931403",
"0.53860164",
"0.5380978",
"0.53784657",
"0.5361902"
] | 0.73892254 | 0 |
The mins method returns the lower bounds of the action spaces' parameters. | def mins(self) -> Tensor:
return self._ranges[:, 0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mins(self):\n return self._mins",
"def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)",
"def mins(self):\n return self.intervals[:, 0]",
"def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX",
"def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv",
"def calculate_min_max_tiles(self):",
"def action_space(self):\n lower_bounds = np.array([])\n upper_bounds = np.array([])\n for joint in self._used_joints:\n joint_idx = self._joint_limits.joint_names.index(joint)\n if self._control_mode == 'position':\n lower_bounds = np.concatenate(\n (lower_bounds,\n np.array(self._joint_limits.position_lower[\n joint_idx:joint_idx + 1])))\n upper_bounds = np.concatenate(\n (upper_bounds,\n np.array(self._joint_limits.position_upper[\n joint_idx:joint_idx + 1])))\n elif self._control_mode == 'velocity':\n velocity_limit = np.array(\n self._joint_limits.velocity[joint_idx:joint_idx + 1]) * 0.1\n lower_bounds = np.concatenate((lower_bounds, -velocity_limit))\n upper_bounds = np.concatenate((upper_bounds, velocity_limit))\n elif self._control_mode == 'effort':\n effort_limit = np.array(\n self._joint_limits.effort[joint_idx:joint_idx + 1])\n lower_bounds = np.concatenate((lower_bounds, -effort_limit))\n upper_bounds = np.concatenate((upper_bounds, effort_limit))\n else:\n raise ValueError(\n 'Control mode %s is not known!' % self._control_mode)\n return gym.spaces.Box(\n np.concatenate((lower_bounds, np.array([0]))),\n np.concatenate((upper_bounds, np.array([100]))),\n dtype=np.float32)",
"def get_parameters_min(self):\n minValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n minValues[i] = p.get_min_value()\n i += 1\n return minValues",
"def _get_observation_lower_bound(self):\n lower_bound = -self._get_observation_upper_bound()\n lower_bound[-7] = 0.0\n lower_bound[-2:] = [self.min_speed, self.min_side_speed]\n return lower_bound",
"def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def potential_min(self):\n\n return self._args.min",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)",
"def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]",
"def min_range(self):\n return self._min_range",
"def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))",
"def cmin(self):\n return self[\"cmin\"]",
"def input_bounds(self):\n return self._min_input, self._max_input",
"def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))",
"def getCurrentAnimRange():\n return int(oma.MAnimControl.minTime().value), int(oma.MAnimControl.maxTime().value)",
"def min_mireds(self):\n return 175",
"def get_minimum():\n return [\n convert_variables([0.78547, 0.78547, 0.78547]),\n ]",
"def min_(*args, **kwargs):\n ...",
"def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row",
"def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params",
"def _get_minth(self):\n return self.__minth",
"def _get_minth(self):\n return self.__minth"
] | [
"0.65231216",
"0.63490486",
"0.6329805",
"0.6140933",
"0.604615",
"0.5979384",
"0.59507585",
"0.5943575",
"0.5881969",
"0.5867249",
"0.58545846",
"0.5735611",
"0.5735611",
"0.5695982",
"0.568224",
"0.5630734",
"0.56240106",
"0.55890894",
"0.556751",
"0.55503213",
"0.55368114",
"0.55276316",
"0.5522249",
"0.5487285",
"0.5485602",
"0.5477757",
"0.5475168",
"0.5452155",
"0.54489595",
"0.54489595"
] | 0.6706375 | 0 |
The maxs method returns the upper bounds of the action spaces' parameters. | def maxs(self) -> Tensor:
return self._ranges[:, 1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maxs(self):\n return self._maxs",
"def get_parameters_max(self):\n maxValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n maxValues[i] = p.get_max_value()\n i += 1\n return maxValues",
"def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)",
"def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)",
"def maxs(self):\n return self.intervals[:, 1]",
"def max_grains(self):\n index = self._ordered_input_names.index('max_grains')\n return self._inputs[index]",
"def max_positions(self):\n return self.args.max_positions",
"def maxQ(self,state):\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in self.actions:\r\n qCurr = self.Q[(state,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr \r\n return(maxQ,maxA)",
"def maxpoints(self):\n return self[\"maxpoints\"]",
"def maxQ(self,feat):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n for a in self.actions:\r\n q = self.Q(feat,a)\r\n print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)",
"def max_values(self, lower, upper):\n if not self.upper_bounds is None:\n return self.upper_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.upper_bounds = plus.dot(upper) + minus.dot(lower) + self.const\n \n return self.upper_bounds",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")",
"def return_parameter_bounds(maximum_luminosity=20):\n return [(maximum_luminosity, maximum_luminosity + 3),\n (3 * 10 ** -4, 8 * 10 ** -3), (2., 350), (-8., -0.2),\n (-400, 400)]",
"def max_positions(self):\r\n return (self.args.max_source_positions, self.args.max_target_positions)",
"def potential_max(self):\n\n return self._args.max",
"def max_positions(self):\n return self.student.max_positions() # also needed in validation runs.",
"def max_range(self):\n return self._max_range",
"def input_bounds(self):\n return self._min_input, self._max_input",
"def maxTargets(self):\n return self._getAttribute(Attribute.maxTargets)",
"def get_max_q(self, actions, q2_state):\n\n action_values = [ qtron.forward_pass(q2_state) for qtron in actions.values() ]\n\n maxQ = max(action_values)\n\n return maxQ",
"def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)",
"def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n maxScore = {}\n bestVal = -(float(\"inf\"))\n maxAction = \"\"\n for action in gameState.getLegalPacmanActions():\n maxScore[action] = self.expectiminimax(gameState.generatePacmanSuccessor(action), self.depth)\n for x,y in maxScore.iteritems():\n if(bestVal< y):\n bestVal = y\n maxAction = x\n return maxAction",
"def max_positions(self):\n return (self.args.max_source_positions, self.args.max_target_positions)",
"def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]",
"def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row",
"def max_value(board): # the X player wants to maximize the score\n if terminal(board):\n return utility(board), None\n else:\n v = -math.inf\n move = None\n for action in actions(board):\n val, _ = min_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val > v:\n # Assign v the maximum value for future evaluation\n v = max(v,val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == 1:\n return v, move\n return v, move",
"def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))"
] | [
"0.6918863",
"0.66844064",
"0.6673597",
"0.6575035",
"0.6552709",
"0.6513679",
"0.6467321",
"0.6414459",
"0.62708175",
"0.62123346",
"0.620865",
"0.62085176",
"0.62085176",
"0.6191484",
"0.6156986",
"0.6142866",
"0.6112747",
"0.6100505",
"0.6096278",
"0.60705197",
"0.60639876",
"0.60580313",
"0.60451984",
"0.6045136",
"0.60360354",
"0.6034074",
"0.6019618",
"0.6010511",
"0.601019",
"0.5999817"
] | 0.6770987 | 1 |
The _generate_iterator method creates an iterator which runs over all possible parameter combinations | def _generate_iterator(self) -> Iterable:
params: List[Tensor] = []
for angle_range in self._ranges:
lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)
params.append(lin_space)
power: int
dims: int
for i in range(0, self._num_params):
power = len(self._ranges) - 1 - i
dims = i
params[i] = params[i].repeat_interleave(self._num_steps ** power)
params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()
return zip(*params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n items = sorted(p.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params",
"def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)",
"def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n modstr = '%s__' % self.modality\n items = sorted([(k.replace('clf__'+modstr, ''), v) for k, v in p.items() if modstr in k])\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params",
"def __iter__(self):\n return self._product_generator()",
"def __http_requests_generator(request_template, parameters):\n for payload in itertools.product(*parameters):\n yield request_template.format(*payload), payload",
"def __iter__(self):\n yield from self.gen",
"def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]",
"def parameters(self):\n for parameters in self:\n for parameter in parameters:\n yield parameter",
"def generate_assignment(parameters):\n if len(parameters) == 0:\n yield []\n raise StopIteration()\n cp_pars = copy.deepcopy(parameters)\n par, values = cp_pars.popitem()\n for val in values:\n for r in generate_assignment(cp_pars):\n yield r + [(par,val)]",
"def map(_, params):\n import numpy as np\n from itertools import product\n from random import shuffle\n\n if 'param_set' in params:\n parameter_sets = params['param_set']\n else:\n alphas = params['alphas']\n Vs = params['Vs']\n gammas = params['gammas']\n parameter_sets = [item for item in product(alphas, gammas, Vs)]\n shuffle(parameter_sets)\n\n ## discretize the parameter configurations and equitably distribute\n ## them for the next map instance to deal with.\n chunk_length = len(parameter_sets)/params['nprocs']\n leftover = len(parameter_sets) % params['nprocs']\n for n in xrange(params['nprocs']):\n if n < leftover:\n left = n*(1+chunk_length)\n to_yield = parameter_sets[left:left+1+chunk_length]\n else:\n left = leftover*(1+chunk_length) + (n-leftover)*chunk_length\n to_yield = parameter_sets[left:left+chunk_length]\n #print n, to_yield, len(to_yield)\n yield (n, to_yield)",
"def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def __iter__():",
"def __iter__():",
"def __iter__():",
"def __iter__():",
"def __iter__(self):\n return iproduct(*self.sets)",
"def iterator(self):\n yield",
"def __iter__(self):\n for o in self._iter:\n yield o",
"def _get_param_iterator(self, modality):\n return ParameterGridSeparate(self.param_grid, modality)",
"def iterparams(params: Dict[str, List[Any]]) -> Dict[str, Any]:\n for set in product(*params.values()):\n yield dotdict(zip(params.keys(), set))",
"def _generate_combinations(self, param_idx, params):\n\n if param_idx == len(self.grid) - 1:\n # last parameter, just return list of values for this parameter\n return [[value] for value in self.grid[params[param_idx]]]\n else:\n subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations\n result = []\n\n # iterate over all values of current parameter\n for value in self.grid[params[param_idx]]:\n for subcombination in subcombinations:\n result.append([value] + subcombination)\n\n return result",
"def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)",
"def __iter__(self):\n return self.new_generator()",
"def iter_jobs(self):\n for param in self._parameters:\n yield param",
"def iter_params(self):\n for var, val in self._params.iteritems():\n yield var, val",
"def args_generator(args, num_exps, grid_search):\n od = OrderedDict(args)\n\n if grid_search:\n keys = [param for param in od]\n\n for i, vals in enumerate(itertools.product(*[value for param, value in od.items()])):\n yield zip(keys + ['-exp-id'], [str(val) for val in vals] + [str(i)])\n else:\n for _ in range(num_exps):\n args_setting = [(pname, str(next(pvalue))) for pname, pvalue in od.items()]\n\n yield args_setting",
"def _build_iterable(self):",
"def __iter__(self):\n from sage.misc.mrange import cartesian_product_iterator\n\n if self._cd._length == 1:\n if self._cd._degree == 1:\n yield self([[0]])\n return\n\n S = self._cd._sym\n profile = list(self._profile)[:-1]\n for p in cartesian_product_iterator([S.conjugacy_class(pi)\n for pi in profile]):\n if self._cd._connected and not perms_are_connected(p, self._cd._degree):\n continue\n c = self._cd(list(p) + [None], check=False)\n if c.profile() == self._profile:\n yield c",
"def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen"
] | [
"0.7333558",
"0.6785712",
"0.67566705",
"0.67345154",
"0.66711015",
"0.66702646",
"0.66659456",
"0.64822334",
"0.6465031",
"0.64270854",
"0.64160955",
"0.6409563",
"0.638886",
"0.638886",
"0.638886",
"0.638886",
"0.6387711",
"0.6350889",
"0.6341186",
"0.6333276",
"0.6317058",
"0.63053447",
"0.6263301",
"0.62535733",
"0.62523204",
"0.6237615",
"0.621541",
"0.6198058",
"0.6197711",
"0.6158161"
] | 0.73833257 | 0 |
Function to rotate one vector to another, inspired by vrrotvec.m in MATLAB | def vrrotvec(a,b):
a = normalize(a)
b = normalize(b)
ax = normalize(np.cross(a,b))
angle = np.arccos(np.minimum(np.dot(a,b),[1]))
if not np.any(ax):
absa = np.abs(a)
mind = np.argmin(absa)
c = np.zeros((1,3))
c[mind] = 0
ax = normalize(np.cross(a,c))
r = np.concatenate((ax,angle))
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def svecRotate(v, T):\n \n return svec(Rotate(smat(v), T))",
"def _rot(theta, vec):\n\n rmat = scipy.array([[scipy.cos(theta), -1*scipy.sin(theta)],\n [scipy.sin(theta), scipy.cos(theta)]]) \n return scipy.dot(rmat,vec)",
"def rotate_vectors(q, vec):\n rot_vec = []\n for i, v in enumerate(vec):\n rot_vec.append(q.rotate(v))\n return rot_vec",
"def vec_rotate_right(x):\n return jnp.roll(x, 1)",
"def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]",
"def rotate(self, other: (float, \"Vector\")) -> \"Vector\":\n if isinstance(other, (int, float)):\n return self._rotate2D(other)\n else:\n return self._matrix_mul(other)",
"def rotate(initial_vector, rotated_vector, other_vectors):\n\n init_vec_norm = normalize(initial_vector)\n rot_vec_norm = normalize(np.asarray(rotated_vector))\n middle_vec_norm = normalize(init_vec_norm + rot_vec_norm)\n first_reflector = init_vec_norm - middle_vec_norm\n second_reflector = middle_vec_norm - rot_vec_norm\n Q1 = householder(first_reflector)\n Q2 = householder(second_reflector)\n reflection_matrix = np.matmul(Q2, Q1)\n rotated_vectors = np.matmul(other_vectors, np.transpose(reflection_matrix))\n return rotated_vectors",
"def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)",
"def RotateVector(rotation, vector):\n return Vector(\n rotation.rot[0][0]*vector.x + rotation.rot[1][0]*vector.y + rotation.rot[2][0]*vector.z,\n rotation.rot[0][1]*vector.x + rotation.rot[1][1]*vector.y + rotation.rot[2][1]*vector.z,\n rotation.rot[0][2]*vector.x + rotation.rot[1][2]*vector.y + rotation.rot[2][2]*vector.z,\n vector.t\n )",
"def rotvec2r(theta, v):\n v = arg2array(v);\n ct = cos(theta)\n st = sin(theta)\n vt = 1 - ct\n r = mat([[ct, -v[2] * st, v[1] * st], \\\n [v[2] * st, ct, -v[0] * st], \\\n [-v[1] * st, v[0] * st, ct]])\n return v * v.T * vt + r",
"def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)",
"def rotate(v: vect2d, angle: float) -> vect2d:\n vector = ((v.x * math.cos(angle) - v.y * math.sin(angle)),\n (v.x * math.sin(angle) + v.x * math.cos(angle)))\n return vector",
"def build_rotation_matrix(vec_new, vec_orig):\n theta = np.arccos(np.dot(vec_new, vec_orig) / (np.linalg.norm(vec_new) * np.linalg.norm(vec_orig)))\n\n R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n\n return R",
"def rotvec2tr(theta, v):\n return r2t(rotvec2r(theta, v))",
"def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R",
"def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)",
"def rotate(q, v):\n if v.ndim == 1:\n qv = np.append(v,0)\n else:\n qv = np.hstack([v,np.zeros((len(v),1))])\n out = mult(q,qv)\n out = mult(out, inv(q))\n return out[:,:3]",
"def vecRot(data, seq, euler_angles, **kwargs):\n from scipy.spatial.transform import Rotation as R\n r = R.from_euler(seq, euler_angles, **kwargs)\n return r.apply(data)",
"def test_rotate_vec(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(-1, -1, -1), 180.)\n vec = Vec3(1, 0, 0)\n\n rotated_vec = quat.rotate_vec(vec)\n\n self.assertAlmostEqual(-1/3.0, rotated_vec.x)\n self.assertAlmostEqual(2/3.0, rotated_vec.y)\n self.assertAlmostEqual(2/3.0, rotated_vec.z)",
"def rotate_a(X,vector):\r\n\taxis_vector = (math.radians(-X)) * np.array([1,0,0])\r\n\tr = R.from_rotvec(axis_vector)\r\n\treturn list(r.apply(vector))",
"def compute_rot(v):\n if v[0] >= 0:\n M = nd.eye(len(v))\n else:\n M = - nd.eye(len(v))\n for i in range(1, len(v)):\n if v[i] == 0:\n continue\n rot_minus_theta = nd.eye(len(v))\n temp = nd.dot(M, v)\n\n theta = nd.arctan(temp[i]/temp[0])\n c = nd.cos(theta)\n s = nd.sin(theta)\n\n rot_minus_theta[0,0] = c\n rot_minus_theta[i,i] = c\n rot_minus_theta[0,i] = s\n rot_minus_theta[i,0] = -s\n\n M = nd.dot(rot_minus_theta, M)\n return M",
"def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))",
"def rotate_vector(vector, angle, anchor=(0, 0)):\n x = vector.x - anchor[0]\n y = vector.y - anchor[1]\n\n cos_theta = cos(angle)\n sin_theta = sin(angle)\n\n nx = x*cos_theta - y*sin_theta\n ny = x*sin_theta + y*cos_theta\n\n nx = nx + anchor[0]\n ny = ny + anchor[1]\n\n return Vector2D(nx, ny)",
"def mirror_vector_vector(v1, v2):\n return subtract_vectors(v1, scale_vector(v2, 2 * dot_vectors(v1, v2)))",
"def vec_rotate_left(x):\n return jnp.roll(x, -1)",
"def apply_rotation_only(self, vector):\n return np.dot(self.rotation_matrix, vector)",
"def rot(vec, angle, degrees=True):\r\n if degrees:\r\n angle = np.radians(angle)\r\n r = np.array([[np.cos(-angle), -np.sin(-angle)],\r\n [np.sin(-angle), np.cos(-angle)]])\r\n return r.dot(vec)",
"def get_rotation_vector(R):\n v = np.array([R[1,2] - R[2,1],\n R[2,0] - R[0,1],\n R[0,1] - R[1,0]]) # eq. 3.12 in [1], pp.66\n return v",
"def vec_rotate_r2g(al, be, ga, lon, lat, urot, vrot, flag):\n\n # first get another coordinate\n if flag == 1:\n (rlon, rlat) = scalar_g2r(al, be, ga, lon, lat)\n else:\n rlon = lon\n rlat = lat\n (lon, lat) = scalar_r2g(al, be, ga, rlon, rlat)\n\n # then proceed...\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n\n rotate_matrix = np.linalg.pinv(rotate_matrix) \n \n rlat = rlat * rad\n rlon = rlon * rad\n lat = lat * rad\n lon = lon * rad\n\n # vector in rotated Cartesian\n txg = -vrot * np.sin(rlat) * np.cos(rlon) - urot * np.sin(rlon)\n tyg = -vrot * np.sin(rlat) * np.sin(rlon) + urot * np.cos(rlon)\n tzg = vrot * np.cos(rlat)\n\n # vector in geo Cartesian\n txr = (\n rotate_matrix[0, 0] * txg\n + rotate_matrix[0, 1] * tyg\n + rotate_matrix[0, 2] * tzg\n )\n tyr = (\n rotate_matrix[1, 0] * txg\n + rotate_matrix[1, 1] * tyg\n + rotate_matrix[1, 2] * tzg\n )\n tzr = (\n rotate_matrix[2, 0] * txg\n + rotate_matrix[2, 1] * tyg\n + rotate_matrix[2, 2] * tzg\n )\n\n # vector in geo coordinate\n v = (\n -np.sin(lat) * np.cos(lon) * txr\n - np.sin(lat) * np.sin(lon) * tyr\n + np.cos(lat) * tzr\n )\n u = -np.sin(lon) * txr + np.cos(lon) * tyr\n\n u = np.array(u)\n v = np.array(v)\n\n return (u, v)",
"def cpvrotate(self, other):\n return Vec2d(self.x*other.x - self.y*other.y, self.x*other.y + self.y*other.x)"
] | [
"0.73906195",
"0.7337561",
"0.72559744",
"0.70578516",
"0.702117",
"0.6936926",
"0.69332725",
"0.6840454",
"0.68362135",
"0.6819142",
"0.6817099",
"0.67670673",
"0.6754099",
"0.6747304",
"0.67367476",
"0.6716166",
"0.6703668",
"0.6696891",
"0.66868734",
"0.6665173",
"0.66038626",
"0.6592167",
"0.65872097",
"0.6586955",
"0.65862936",
"0.6567359",
"0.656555",
"0.6548028",
"0.6525666",
"0.6516882"
] | 0.7442148 | 0 |
Sort the buses reversed by their period, having tagged them with their position in the sequence, which is their c value. >>> list(prep_input(EXAMPLE_BUSES)) [(59, 4), (31, 6), (19, 7), (13, 1), (7, 0)] | def prep_input(buses):
return sorted([(bus, offset)
for offset, bus
in enumerate(buses)
if bus], reverse=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_bc_freqs(bc_freqs):\r\n\r\n bcs_list = []\r\n for curr_key in bc_freqs.keys():\r\n bcs_list.append((curr_key, int(bc_freqs[curr_key])))\r\n\r\n bcs_list = sorted(bcs_list, key=itemgetter(1), reverse=True)\r\n\r\n sorted_bcs = []\r\n for curr_bc in bcs_list:\r\n sorted_bcs.append(\"%s\\t%d\" % (curr_bc[0], curr_bc[1]))\r\n\r\n return sorted_bcs",
"def bsort(seq, cmp):\n sorted = False # assume the seq is not sorted to start with\n while not sorted:\n sorted = True # assume it's already sorted correctly\n for index, value in enumerate(seq): # for every element in seq\n if index > 0: # past the first..\n if not cmp(seq[index-1], value): # if this element is out of order\n sorted = False # then the list is not sorted yet\n seq[index-1], seq[index] = seq[index], seq[index-1] # and swap it",
"def calculate_finishing_order(x):\n\t# Creates a list of keys which are sorted by their values\n\n\treturn [sailor_names for sailor_names,sailorValues in sorted(x.items(), key=lambda y: y[1], reverse=True)]",
"def sort_auto(self):\n key = lambda buz1, buz2: buz1 if buz1.trip_duration <= buz2.trip_duration else buz2\n self.autobuze.sort(key=key)",
"def _sort_manber_myers(self, suffix_pos: List) -> List:\n bucket = self._create_bucket(suffix_pos)\n for _, v in sorted(bucket.items()):\n if self.debug: print(f\"_sort_manber_myers function: bucket value: {v}\") \n if len(v) > 1:\n # recursive call for next stage\n self.stage *= 2\n self._sort_manber_myers(v)\n else:\n # otherwise add starting position of suffix to result\n self.suffixes.append(v[0]) \n if self.debug: print(f\"_sort_manber_myers function: suffixes: {self.suffixes}\\n\") \n return self.suffixes",
"def _reversed_insts_to_header(self, bb):\n rinsts = []\n for bb in self._bbs_to_header(bb):\n rinsts.extend(reversed(bb.insts))\n return rinsts",
"def sort_by_bfile(self, bfiles, events_by_b):\n if len(bfiles) > 1:\n print \"Warning: Several .b files detected. Using the first in list:\"\n print bfiles[0]\n with open(bfiles[0], 'r') as bf:\n for line in bf:\n data = line.split()\n try:\n jobid = data[0]\n impb = float(data[1])\n if impb >= self._bmin and impb <= self._bmax:\n events_by_b.append(self.outputname(jobid))\n except ValueError:\n continue",
"def bordasOf(self, bundle):\n\t\treturn sorted([self.borda[item] for item in bundle], reverse=True)",
"def comb_sort(data):\n shrink_factor = 1.3\n gap = len(data)\n swapped = True\n i = 0\n\n while gap > 1 or swapped:\n # Update the gap value for a next comb\n gap = int(float(gap) / shrink_factor)\n\n swapped = False\n i = 0\n\n while gap + i < len(data):\n if data[i] > data[i + gap]:\n # Swap values\n data[i], data[i + gap] = data[i + gap], data[i]\n swapped = True\n i += 1\n\n return data",
"def getRevCodonSeqs(self):\r\n compDict = {'A': 't', 'T': 'a', 'G': 'c', 'C': 'g'} # nuc compliments for reverse strand\r\n revPep = [] # list to hold the temporary reverse peptides before incorporation into the complete list\r\n for seq in self.allPepSeqs:\r\n revSeq = seq[::-1] # reverses the strand to be prepped for nt compliments\r\n for nuc in compDict:\r\n revSeq = revSeq.replace(nuc, compDict[nuc]) # replaces nt's with their compliments\r\n revSeq = revSeq.upper()\r\n revPep.append(revSeq)\r\n for i in revPep:\r\n self.allPepSeqs.append(i) # adds the reverse strand peptide to the list of possible peptide seqs\r\n return",
"def sort_currency_list_if_changed(self):\r\n currency_list = self.gox.wallet.keys()\r\n if len(currency_list) == len(self.sorted_currency_list):\r\n return\r\n\r\n # now we will bring base and quote currency to the front and sort the\r\n # the rest of the list of names by acount balance in descending order\r\n if self.gox.curr_base in currency_list:\r\n currency_list.remove(self.gox.curr_base)\r\n if self.gox.curr_quote in currency_list:\r\n currency_list.remove(self.gox.curr_quote)\r\n currency_list.sort(key=lambda name: -self.gox.wallet[name])\r\n currency_list.insert(0, self.gox.curr_quote)\r\n currency_list.insert(0, self.gox.curr_base)\r\n self.sorted_currency_list = currency_list",
"def buble_sort(l):\r\n for i in range(len(l)):\r\n for j in range(i+1, len(l)):\r\n if (l[j-1]>l[j]):\r\n l[j-1], l[j] = l[j], l[j-1]",
"def bubbleSort(list):",
"def bubbleSort(sequence):\n n = len(sequence)\n # Perform n-1 bubble operations on the sequence\n for i in range(n - 1):\n # Bubble the largest item to the end.\n for j in range(n - i - 1):\n if sequence[j] > sequence[j+1]:\n sequence[j], sequence[j+1] = sequence[j+1], sequence[j]",
"def sorter(sequence):\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]",
"def reversesort(self):\n ...",
"def sorter(sequence):\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[0]",
"def BubbleSort(ulist):\n done = 0 #This variable is used to break the loop when sorting is done\n while not done:\n done = 1\n for i in range(len(ulist) - 1):\n if ulist[i] > ulist[i+1]:\n ulist[i], ulist[i+1] = ulist[i+1], ulist[i]\n done = 0",
"def sort_fasta_by_abundance(fasta_lines, fasta_out_f):\r\n seq_index = {}\r\n count = 0\r\n for seq_id, seq in parse_fasta(fasta_lines):\r\n count += 1\r\n try:\r\n seq_index[seq].append(seq_id)\r\n except KeyError:\r\n seq_index[seq] = [seq_id]\r\n\r\n seqs = []\r\n for k, v in seq_index.items():\r\n seqs.append((len(v), k, v))\r\n del seq_index[k]\r\n seqs.sort()\r\n for count, seq, seq_ids in seqs[::-1]:\r\n for seq_id in seq_ids:\r\n fasta_out_f.write('>%s\\n%s\\n' % (seq_id, seq))",
"def burbuja(lista:list):\n vector = lista\n for i in range(0, len(vector)-1):\n for j in range(0, len(vector)-1):\n if vector[j] > vector[j+1]:\n tmp = vector[j+1]\n vector[j+1] = vector[j]\n vector[j] = tmp\n return vector",
"def order_ideal(self, gens):",
"def order(inputspectra):\n \n if np.all(np.diff(inputspectra.x_values) <= 0):\n inputspectra.x_values = inputspectra.x_values[::-1]\n inputspectra.spectra = inputspectra.spectra[:,::-1]\n return inputspectra",
"def test_sort_reversed():\n reverse_sorted_data = [3, 2, 1]\n sorted_data = bubble_sort(reverse_sorted_data)\n assert sorted_data == [1, 2, 3]",
"def test_bd_cycles_ascending(fprime_test_api):\n length = 60\n count_pred = predicates.greater_than(length - 1)\n results = fprime_test_api.await_telemetry_count(\n count_pred, \"blockDrv.BD_Cycles\", timeout=length\n )\n last = None\n reordered = False\n ascending = True\n for result in results:\n if last is not None:\n last_time = last.get_time()\n result_time = result.get_time()\n if result_time - last_time > 1.5:\n msg = \"FSW didn't send an update between {} and {}\".format(\n last_time.to_readable(), result_time.to_readable()\n )\n fprime_test_api.log(msg)\n elif result_time < last_time:\n msg = \"There is potential reorder error between {} and {}\".format(\n last_time, result_time\n )\n fprime_test_api.log(msg)\n reordered = True\n\n if not result.get_val() > last.get_val():\n msg = \"Not all updates ascended: First ({}) Second ({})\".format(\n last.get_val(), result.get_val()\n )\n fprime_test_api.log(msg)\n ascending = False\n\n last = result\n\n case = True\n case &= fprime_test_api.test_assert(\n ascending, \"Expected all updates to ascend.\", True\n )\n case &= fprime_test_api.test_assert(\n not reordered, \"Expected no updates to be dropped.\", True\n )\n fprime_test_api.predicate_assert(\n count_pred,\n len(results) - 1,\n \"Expected >= {} updates\".format(length - 1),\n True,\n )\n fprime_test_api.assert_telemetry_count(0, \"rateGroup1Comp.RgCycleSlips\")\n assert case, \"Expected all checks to pass (ascending, reordering). See log.\"",
"def sort_barcodes(barcode_list):\n return sorted(barcode_list, key=alphanum_key)",
"def bubble_sort_modificado(a):\n N = len(a)\n n = 0\n b = False\n while (n != N) and (not b):\n k = N - 1\n b = True\n while k != n:\n if a[k-1] > a[k]:\n b = False\n a[k-1], a[k] = a[k], a[k-1]\n k -= 1\n n += 1",
"def bubble_sort(dataset):\n\t# start with array length and decrement each time \n\tarrayLen = len(dataset)\n\tbubbleIndex = len(dataset) - 1\n\twhile bubbleIndex != 0:\n\t\tarrayIndex = 0\n\t\twhile arrayIndex < arrayLen - 1:\n\t\t\tthisVal = dataset[arrayIndex]\n\t\t\tnextVal = dataset[arrayIndex + 1]\n\t\t\tif thisVal > nextVal:\n\t\t\t\tdataset[arrayIndex + 1] = thisVal\n\t\t\t\tdataset[arrayIndex] = nextVal\n\t\t\tarrayIndex += 1\n\t\tprint \"Current State:\", dataset\n\t\tbubbleIndex -= 1",
"def bubble_sort(input_list):\n \n length = len(input_list)\n \n for i in range(length):\n for j in range(length-i-1):\n if input_list[j] > input_list[j+1]:\n input_list[j], input_list[j+1] = input_list[j+1], input_list[j]\n \n return input_list",
"def order_vep_by_csq(annotation_list):\n for ann in annotation_list:\n ann['major_consequence'] = worst_csq_from_csq(ann['Consequence'])\n return sorted(annotation_list, key=(lambda ann:csq_order_dict[ann['major_consequence']]))",
"def sort_descending(list_in):\n return list(map(str, list_in)).sort(key=lambda f: int(''.join(filter(str.isdigit, f))))"
] | [
"0.5366377",
"0.5326399",
"0.532053",
"0.5228897",
"0.5217887",
"0.5176579",
"0.5169902",
"0.51473904",
"0.513209",
"0.5101623",
"0.50995326",
"0.5060153",
"0.50267196",
"0.5010616",
"0.5007883",
"0.4982695",
"0.49244604",
"0.49026328",
"0.49025616",
"0.4894752",
"0.48676687",
"0.48411286",
"0.48391026",
"0.48261857",
"0.48255587",
"0.47989455",
"0.47885942",
"0.475366",
"0.47492263",
"0.47466934"
] | 0.6246531 | 0 |
Reduce a bunch of periodic signals to a single signal. The value of x that answers the puzzle is the first place ( c + x ) % T = 0, that is to say, c + x = T, or x = Tc. >>> solve_buses(prep_input(EXAMPLE_BUSES)) 1068781 | def solve_buses(prepared_buses):
T, c = functools.reduce(combine_signals, prepared_buses)
return T - c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solution2(inp):\n inp = get_lines(inp)\n notes = inp[1].split(\",\")\n\n offsets = {}\n for i, bus in enumerate(notes):\n if bus == 'x':\n continue\n bus = int(bus)\n offsets[bus] = i\n buses = set(offsets)\n old_buses = buses.copy()\n\n def search(bus, offset, t):\n if (t + offset) % bus == 0:\n buses.remove(bus)\n if len(buses) == 0:\n return True\n new_bus = max(buses)\n return search(new_bus, offsets[new_bus], t)\n return False\n\n cbus = max(buses)\n max_bus = cbus\n s = 100_000_000_000_000\n s = 0\n s = s - s % cbus - offsets[cbus]\n delta = cbus\n stack = buses.copy()\n stack.remove(cbus)\n sec_max = max(stack)\n while not search(max_bus, offsets[max_bus], offsets[max_bus]):\n buses = old_buses.copy()\n s += delta\n if (s + offsets[sec_max]) % sec_max == 0:\n if len(stack) != 0:\n cbus = max(stack)\n stack.remove(cbus)\n if len(stack) != 0:\n sec_max = max(stack)\n else:\n return s\n delta *= cbus\n\n return s - offsets[max(offsets)]",
"def reduce_B(self, B_on_standard_basis_handles):\n # TODO: Check this description, then move to docstring\n #To see this dt effect, consider:\n #\n #dx/dt = Ax+Bu, approximate as (x^(k+1)-x^k)/dt = Ax^k + Bu^k.\n #Rearranging terms, x^(k+1) = (I+dt*A)x^k + dt*Bu^k.\n #The impulse response is: x^0=0, u^0=1, and u^k=0 for k>=1.\n #Thus x^1 = dt*B, x^2 = dt*(I+dt*A)*B, ...\n #and y^1 = dt*C*B, y^2 = dt*C*(I+dt*A)*B, ...\n #However, the impulse response to the true discrete-time system is\n #x^1 = B, x^2 = A_d*B, ...\n #and y^1 = CB, y^2 = CA_d*B, ...\n #(where I+dt*A ~ A_d)\n #The important thing to see is the factor of dt difference.\n \n self.B_reduced = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, B_on_standard_basis_handles)\n if not self.is_basis_orthonormal:\n self.B_reduced = self._get_proj_mat() * self.B_reduced\n return self.B_reduced",
"def get_quickest_bus(departure_time: int, buses: List[int]) -> int:\n quickest_bus = sorted(buses,\n key=lambda x: get_wait_time(departure_time, x),\n reverse=False)[0]\n\n return get_wait_time(departure_time, quickest_bus) * quickest_bus",
"def solution1(inp):\n inp = get_lines(inp)\n earliest = int(inp[0])\n notes = inp[1].split(',')\n min_bus = None\n for bus in notes:\n if bus == 'x':\n continue\n bus = int(bus)\n wait_time = bus - earliest % bus\n if min_bus == None or wait_time < (min_bus - earliest % min_bus):\n min_bus = bus\n return min_bus * (min_bus - earliest % min_bus)",
"def filter_buses(list_of_buses):\n for bus in list_of_buses:\n return bus",
"def soustraction(a,b):\n bina = [int(x) for x in bin(a)[2:]]\n binb = [int(x) for x in bin(b)[2:]]\n while len(bina) >= len(binb):\n binb = [0]+binb\n while len(bina) < len(binb)-1:\n bina = [0]+bina\n bina.reverse()\n binb.reverse()\n n = len(bina)+len(binb)\n na = len(bina)\n q = QuantumRegister(n+1, 'q')\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binb)):\n if binb[i]:\n circ.x(q[na+i])\n sub(circ, q, [q[i] for i in range(len(bina))], [q[i+na] for i in range(len(binb)-1)], q[n], q[na+len(binb)-1])\n circ_m = measure(circ, q, [i for i in range(na, n)])\n return circ_m",
"def prep_input(buses):\n return sorted([(bus, offset) \n for offset, bus \n in enumerate(buses) \n if bus], reverse=True)",
"def reverse_map(coarse_grained, mapping_moieties, target=None, solvent_name=None, sol_per_bead=4, sol_cutoff=2, scaling_factor=5, parallel=True):\n\n aa_system = Compound()\n\n not_solvent = [mol for mol in coarse_grained.children if mol.name != solvent_name]\n is_solvent = [mol for mol in coarse_grained.children if mol.name == solvent_name]\n\n print(\"There are {} non-solvent molecules and {} solvent molecules.\".format(len(not_solvent), len(is_solvent)))\n\n # For each bead, replace it with the appropriate mb compound\n # Iterate through each molecule (set of particles that are bonded together)\n if parallel:\n pool = mp.Pool(processes=mp.cpu_count())\n\n # get the solvent molecules mapped in parallel\n inp = zip(is_solvent,\n [target[solvent_name]]*len(is_solvent),\n [sol_per_bead]*len(is_solvent),\n [sol_cutoff]*len(is_solvent))\n chunksize = int(len(is_solvent) / mp.cpu_count()) + 1\n solvent_list = pool.starmap(reverse_map_solvent, inp, chunksize)\n # name the solvents\n\n # get the non_solvent molecules mapped in parallel\n inp = zip(not_solvent,\n [target]*len(not_solvent),\n [mapping_moieties]*len(not_solvent))\n chunksize = int(len(not_solvent) / mp.cpu_count()) + 1\n molecule_list = pool.starmap(reverse_map_molecule, inp, chunksize)\n\n\n # put put solvents in one list\n solvent_molecule_list = []\n for i in solvent_list:\n solvent_molecule_list += i\n\n # put lipids in a box and get the box size\n for molecule in molecule_list:\n aa_system.add(molecule)\n\n print(aa_system.boundingbox)\n\n # put everything in a box\n for molecule in solvent_molecule_list:\n aa_system.add(molecule)\n\n else:\n [aa_system.add(reverse_map_molecule(molecule, target, mapping_moieties)) for molecule in not_solvent]\n solvent_compound = reverse_map_solvent(is_solvent, target[solvent_name], sol_per_bead, sol_cutoff)\n [aa_system.add(molecule) for molecule in solvent_compound.children]\n\n\n return aa_system",
"def compute_bias(ics, vbc):\n import os, time\n from seren3.array import SimArray\n \n # Compute size of grid and boxsize (for this patch)\n N = vbc.shape[0]\n boxsize = ics.boxsize.in_units(\"Mpc a h**-1\") * (float(N) / float(ics.header.N))\n\n # Compute vbc @ z=1000\n z = ics.z\n rms = vbc_rms(vbc)\n rms_recom = rms * (1001./z)\n\n # Check for PS and run CICsASS if needed\n fname_vbc0 = vbc_ps_fname(0., z, boxsize)\n if not os.path.isfile(fname_vbc0):\n exit_code = run_cicsass(boxsize, z, 0., fname_vbc0)\n\n fname_vbcrecom = vbc_ps_fname(rms_recom, z, boxsize)\n if not os.path.isfile(fname_vbcrecom):\n exit_code = run_cicsass(boxsize, z, rms_recom, fname_vbcrecom)\n\n # Load power spectra and compute bias\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n # Should have same lenghts if finished writing\n count = 0\n while len(ps_vbcrecom[1]) != len(ps_vbc0[1]):\n count += 1\n if count > 10:\n raise Exception(\"Reached sleep limit. Filesizes still differ\")\n time.sleep(5)\n ps_vbc0 = np.loadtxt(fname_vbc0, unpack=True)\n ps_vbcrecom = np.loadtxt(fname_vbcrecom, unpack=True)\n\n #CDM bias\n b_cdm = ps_vbcrecom[1] / ps_vbc0[1]\n # Baryon bias\n b_b = ps_vbcrecom[2] / ps_vbc0[2]\n # Wavenumber\n k_bias = SimArray(ps_vbcrecom[0] / ics.cosmo[\"h\"], \"h Mpc**-1\")\n\n return k_bias, b_cdm, b_b",
"def fast_fdsb(self, signals: np.ndarray, delays: np.ndarray) -> np.ndarray:\n\t\tif (len(signals.shape) == 2): signals = signals[None,:,:]\n\n\t\tfconv = np.einsum(\"kij,ijlm->ilmk\", signals, delays)\n\t\tconv = np.fft.ifft(fconv, axis=0).real\n\t\tsquared_conv = np.einsum(\"ijkm,ijkm->jkm\", conv, conv)\n\t\treturn squared_conv",
"def drag_schedules(beta_list, qubits, pulse_amp, pulse_width,\n pulse_sigma=None,\n width_sigma_ratio=4, drives=None, cmd_def=None,\n inst_map=None, meas_map=None):\n\n xdata = beta_list\n\n # copy the instruction to schedule mapping\n inst_map = copy.deepcopy(inst_map)\n if not inst_map:\n inst_map = copy.deepcopy(cmd_def)\n\n if pulse_sigma is None:\n pulse_sigma = pulse_width / width_sigma_ratio\n\n # Construct the circuits\n qr = qiskit.QuantumRegister(max(qubits) + 1)\n cr = qiskit.ClassicalRegister(len(qubits))\n\n circuits = []\n\n for circ_index, b_amp in enumerate(beta_list):\n\n circ = qiskit.QuantumCircuit(qr, cr)\n circ.name = 'dragcircuit_%d_0' % circ_index\n\n for qind, qubit in enumerate(qubits):\n\n # positive drag pulse\n drag_pulse = pulse_lib.drag(duration=pulse_width,\n amp=pulse_amp[qind],\n beta=b_amp,\n sigma=pulse_sigma,\n name='drag_pulse_%d_%d' % (circ_index,\n qubit))\n\n drag_gate = Gate(name='drag_%d_%d' % (circ_index, qubit),\n num_qubits=1, params=[])\n\n # add commands to schedule\n schedule = pulse.Schedule(name='drag_pulse_%f_%d' % (b_amp,\n qubit))\n\n schedule += drag_pulse(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('drag_%d_%d' % (circ_index, qubit), qubits=[qubit],\n schedule=schedule)\n\n # negative pulse\n drag_pulse2 = pulse_lib.drag(duration=pulse_width,\n amp=-1*pulse_amp[qind],\n beta=b_amp,\n sigma=pulse_sigma,\n name='drag_pulse_%d_%d' % (circ_index,\n qubit))\n\n drag_gate2 = Gate(name='drag2_%d_%d' % (circ_index, qubit),\n num_qubits=1, params=[])\n\n # add commands to schedule\n schedule2 = pulse.Schedule(name='drag_pulse2_%f_%d' % (b_amp,\n qubit))\n\n schedule2 += drag_pulse2(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('drag2_%d_%d' % (circ_index, qubit), qubits=[qubit],\n schedule=schedule2)\n\n circ.append(drag_gate, [qr[qubit]])\n # circ.u1(np.pi, [qr[qubit]])\n circ.append(drag_gate2, [qr[qubit]])\n\n for qind, qubit in enumerate(qubits):\n circ.measure(qr[qubit], cr[qind])\n\n circuits.append(circ)\n\n # schedule\n schedule_config = ScheduleConfig(inst_map, meas_map)\n drag_sched = [schedule_circuit(qcirc,\n schedule_config)\n for qcirc in circuits]\n\n return drag_sched, xdata",
"def build_bridge(blocks):\n bridges = []\n for start in [ b for b in blocks if 0 in b ]:\n tmp = blocks[:]\n tmp.remove(start)\n bridges.append(build(tmp, start[1], [start], sum(start)))\n return find_max()",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def reduce_B(self, B_on_standard_basis_array):\n # TODO: Check this description, then move to docstring\n #To see this dt effect, consider:\n #\n #dx/dt = Ax+Bu, approximate as (x^(k+1)-x^k)/dt = Ax^k + Bu^k.\n #Rearranging terms, x^(k+1) = (I+dt*A)x^k + dt*Bu^k.\n #The impulse response is: x^0=0, u^0=1, and u^k=0 for k>=1.\n #Thus x^1 = dt*B, x^2 = dt*(I+dt*A)*B, ...\n #and y^1 = dt*C*B, y^2 = dt*C*(I+dt*A)*B, ...\n #However, the impulse response to the true discrete-time system is\n #x^1 = B, x^2 = A_d*B, ...\n #and y^1 = CB, y^2 = CA_d*B, ...\n #(where I+dt*A ~ A_d)\n #The important thing to see is the factor of dt difference.\n\n self.B_reduced = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, B_on_standard_basis_array)\n if not self.is_basis_orthonormal:\n self.B_reduced = self._get_proj_mat() * self.B_reduced\n return self.B_reduced",
"def simulate_strategy_loop_known(\n num_buses,\n states,\n decisions,\n utilities,\n costs,\n ev,\n increments,\n num_periods,\n beta,\n unobs,\n):\n for period in range(num_periods):\n for bus in range(num_buses):\n\n old_state = states[bus, period]\n if (-costs[old_state, 0] + unobs[bus, period, 0] + beta * ev[old_state]) > (\n -costs[0, 0] - costs[0, 1] + unobs[bus, period, 1] + beta * ev[0]\n ):\n decision = 0\n utility = -costs[old_state, 0] + unobs[bus, period, 0]\n new_state = old_state + increments[bus, period]\n else:\n decision = 1\n utility = -costs[0, 0] - costs[0, 1] + unobs[bus, period, 1]\n new_state = increments[bus, period]\n\n decisions[bus, period] = decision\n utilities[bus, period] = utility\n states[bus, period + 1] = new_state\n return states, decisions, utilities",
"def mult_mod(a, b, nbr, control):\n bina = [int(x) for x in bin(a)[2:]]\n # binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n while len(binn) < len(bina):\n binn = [0]+binn\n # print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*3+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n q = QuantumRegister(n+2+1, 'q') # +lost+lost2+control\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n if control:\n circ.x(q[n+2])\n cmultmod(circ, q, # control, X, a, A, Y, n, N, binn, lost, lost2\n q[n+2],\n [q[i] for i in range(len(bina))],\n b,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n return circ_m",
"def rabi_schedules(amp_list, qubits, pulse_width, pulse_sigma=None,\n width_sigma_ratio=4, drives=None, cmd_def=None,\n inst_map=None, meas_map=None):\n\n xdata = amp_list\n\n # copy the instruction to schedule mapping\n inst_map = copy.deepcopy(inst_map)\n if not inst_map:\n inst_map = copy.deepcopy(cmd_def)\n\n if pulse_sigma is None:\n pulse_sigma = pulse_width / width_sigma_ratio\n\n # Construct the circuits\n qr = qiskit.QuantumRegister(max(qubits) + 1)\n cr = qiskit.ClassicalRegister(len(qubits))\n\n circuits = []\n\n for circ_index, g_amp in enumerate(amp_list):\n\n circ = qiskit.QuantumCircuit(qr, cr)\n circ.name = 'rabicircuit_%d_0' % circ_index\n\n rabi_pulse = pulse_lib.gaussian(duration=pulse_width,\n amp=g_amp,\n sigma=pulse_sigma,\n name='rabi_pulse_%d' % circ_index)\n\n rabi_gate = Gate(name='rabi_%d' % circ_index, num_qubits=1, params=[])\n\n for _, qubit in enumerate(qubits):\n\n # add commands to schedule\n schedule = pulse.Schedule(name='rabi_pulse_%f_%d' % (g_amp,\n qubit))\n\n schedule += rabi_pulse(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('rabi_%d' % circ_index, qubits=[qubit],\n schedule=schedule)\n\n circ.append(rabi_gate, [qr[qubit]])\n\n for qind, qubit in enumerate(qubits):\n circ.measure(qr[qubit], cr[qind])\n\n circuits.append(circ)\n\n # schedule\n schedule_config = ScheduleConfig(inst_map, meas_map)\n rabi_sched = [schedule_circuit(qcirc,\n schedule_config)\n for qcirc in circuits]\n\n return rabi_sched, xdata",
"def reduce_set(cells: int, blocks: [int], uvars: [int], nbase: int):\n combos = []\n\n if sum(blocks) + (len(blocks) - 1) > cells:\n raise Exception(\"The passed block values exceeded the number of cells\")\n\n ogcombo = []\n acc = 0\n for block in blocks:\n ogcombo.append(acc)\n acc += block + 1\n\n combos.append(ogcombo)\n\n ccombo = ogcombo.copy()\n\n lookat = len(blocks) - 1\n while lookat >= 0:\n if blocks[-1] + ccombo[-1] < cells:\n ccombo[lookat] = ccombo[lookat] + 1\n s = ccombo[lookat] + blocks[lookat] + 1\n for i in range(lookat + 1, len(blocks)):\n ccombo[i] = s\n s += blocks[i] + 1\n lookat = len(blocks) - 1\n combos.append(ccombo.copy())\n else:\n lookat -= 1\n s = ccombo[lookat] + blocks[lookat] + 1\n for i in range(lookat + 1, len(blocks)):\n ccombo[i] = s\n s += blocks[i] + 1\n\n cnf = CNF()\n for combo in combos:\n clause = [-v if in_combo(i, combo, blocks) else v for i, v in zip(range(cells), uvars)]\n cnf.append(clause)\n\n return cnf.negate(nbase)",
"def solve(self, state, times):",
"def biz(cps):\n # Head of unit\n mask = cps['tc5_p'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(10.4 + 1. * rand)\n new_vals = np.where(new_vals < 50000., 50000., new_vals)\n cps.loc[mask, 'bilp'] = new_vals\n # spouse of unit\n mask = cps['tc5_s'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(10.4 + 1. * rand)\n new_vals = np.where(new_vals < 50000., 50000., new_vals)\n cps.loc[mask, 'bils'] = new_vals",
"def bruno_mes(self):\n MI = -125\n MA = 125\n INCR = 19\n####################################################################\n ### CHANNEL 1\n self.write('CHN 1')\n self.write('CHN?')\n print 'Acting on channel:',self.read()\n self.write('WAVE ARB')\n self.write('ARBLOAD ARB1')\n self.write('FREQ 100')\n self.write('DCOFFS 0.05')\n self.write('AMPL 0.1')\n \n l =(125,-125,125)#arange(MI,MA,INCR) # the ramp\n# lll = copy(l)[::-1][1:-1]\n# l = concatenate((l,lll))\n self.write_array_to_byte(l,1)",
"def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs",
"def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3",
"def bake_multiplier(multiplier, sr, duration):\n signal = 1\n for mult in multiplier:\n if mult[\"type\"] == \"fixed\":\n sig = mult[\"value\"]\n elif mult[\"type\"] == \"dynamic\":\n sig = bake_signal(mult[\"value\"], sr, duration)\n \n #if there is no signal yet, assign the first one\n if signal is 1:\n signal = sig\n #if there already is one, combine them with a multiplication\n else:\n signal *= sig\n return signal",
"def reduce_bis(equation:sp.Eq):\n\n assert isinstance(equation,sp.Eq)\n symbols = equation.lhs.free_symbols | equation.rhs.free_symbols\n subs = []\n for symbol in symbols:\n if isinstance(symbol,Bis):\n subs.append((symbol,sp.solve(symbol.bis_eq,symbol)[0]))\n\n reduced = equation.subs(subs)\n return reduced",
"def _decomposition_with_many_workers(control_wires, target_wire, work_wires):\n num_work_wires_needed = len(control_wires) - 2\n work_wires = work_wires[:num_work_wires_needed]\n\n work_wires_reversed = list(reversed(work_wires))\n control_wires_reversed = list(reversed(control_wires))\n\n gates = []\n\n for i in range(len(work_wires)):\n ctrl1 = control_wires_reversed[i]\n ctrl2 = work_wires_reversed[i]\n t = target_wire if i == 0 else work_wires_reversed[i - 1]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))\n\n for i in reversed(range(len(work_wires))):\n ctrl1 = control_wires_reversed[i]\n ctrl2 = work_wires_reversed[i]\n t = target_wire if i == 0 else work_wires_reversed[i - 1]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n for i in range(len(work_wires) - 1):\n ctrl1 = control_wires_reversed[i + 1]\n ctrl2 = work_wires_reversed[i + 1]\n t = work_wires_reversed[i]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))\n\n for i in reversed(range(len(work_wires) - 1)):\n ctrl1 = control_wires_reversed[i + 1]\n ctrl2 = work_wires_reversed[i + 1]\n t = work_wires_reversed[i]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n return gates",
"def solve_canonical_impl(basis, c, A, b):\n (m, n) = A.shape\n Q = np.row_stack(\n (\n np.hstack(([0], -c)),\n np.column_stack((b, A)),\n )\n )\n gauss_elimination(Q, basis)\n\n while True:\n # choose 's' and 'r' according to the Bland's rule\n ss = (j for j in range(1, n + 1) if Q[0][j] < 0)\n s = min(ss, default=None)\n if s is None:\n return basis, Q\n\n rs = [i for i in range(1, m + 1) if Q[i][s] > 0] # and Q[0][s] / Q[i][s] > 0\n r = min(rs, key=lambda i: (abs(Q[0][s] / Q[i][s]), basis[i - 1]), default=None)\n if r is None:\n raise UnboundFunction\n\n Q[r] /= Q[r][s]\n for i in range(m + 1):\n if i != r:\n Q[i] -= Q[r] * Q[i][s]\n\n basis[r - 1] = s",
"def SecondaryComplex_to_Bid():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n alias_model_components()\n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(BidK(bf = None), BidK_0) \n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n\n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n\n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n Parameter('Ka_RIP1_FADD', 1e-7) # Biochemica et Biophysica Acta 1834(2013) 292-300\n Parameter('Kd_RIP1_FADD', 1e-8) # Biochemica et Biophysica Acta 1834(2013) 292-300\n alias_model_components()\n \n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [1e-6, 1e-3])\n bind(FADD(bDD = None), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [1e-6, 1e-3])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex. \n\n #--------------RIP1 Truncation reactions-------------\n #---Truncation by C8---------------------------------\n RIP_CIIA_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIB_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n CIIA = TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=None, bDED2=None)\n \n Rule('RIP1_truncation_CIIA', RIP_CIIA_proC8 >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11',1e-1))\n Rule('RIP1_truncation_CIIB', RIP_CIIB_proC8 >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12', 1e-1))\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #---Truncation by proC8:cFlip_L---------------------\n Riptosome_FADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_TRADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n\n Rule('RIP1_truncation_FADD', Riptosome_FADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13', 1e-1))\n Rule('RIP1_truncation_TRADD', Riptosome_TRADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14', 1e-1))\n \n # -------------RIP3 Binding Interactions----------------\n Ripto1_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n\n Rule('RIP3_binding1', Ripto1_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1, Parameter('k15', 1e-6), Parameter('k16', 1e-3))\n Rule('RIP3_binding2', Ripto2_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2, Parameter('k17', 1e-6), Parameter('k18', 1e-3))\n \n #RIP3 Truncation\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP3(), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #-------------Bid Interactions--------------------------\n # Bid Phosphorylation and Truncation\n catalyze_state(BidK(), 'bf', Bid(), 'bf', 'state', 'U', 'po4', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', Bid(), 'bf', 'state', 'U', 'T', [1.04e-5, 0.005, 0.1])\n\n # Bid-PO4 competing with RIP1 for binding to Complex II\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])\n # Bid-PO4 sequestering RIP1\n bind(RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])",
"def inst_bp(instrument,array=\"2\"):\n\n if instrument == \"MUSTANG2\" or instrument == \"MUSTANG\":\n srms = (300*u.um).to(\"m\") # surface RMS (microns)\n ### Reference: https://science.nrao.edu/facilities/gbt/proposing/GBTpg.pdf\n EA90 = 0.36 # Aperture efficiency at 90 GHz\n ### The beam efficiencies should be taken as 1.37* Aperture Efficiency\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n if instrument == \"MUSTANG2\":\n flow = 75.0 # GHz\n fhig = 105.0 # GHz\n else:\n flow = 82.5 # GHz\n fhig = 97.5 # GHz\n \n farr = np.arange(flow,fhig,1.0) # frequency array.\n tran = farr*0.0 + 1.0 # Let the transmission be unity everywhere.\n Larr = const.c.value/(farr*1.0e9) # Keep calm and carry on.\n ### Old formula:\n #Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n ### Correct formula: (10 April 2018)\n Ruze = Gnot * np.exp(-(4.0*np.pi*srms.value/Larr)**2)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n \n if instrument == \"NIKA2\" or instrument == \"NIKA\":\n caldir='/home/romero/NIKA2/NIKA_SVN/Processing/Pipeline/Calibration/BP/'\n bpfile=caldir+'Transmission_2017_Jan_NIKA2_v1.fits'\n hdulist = fits.open(bpfile)\n\n if array == \"1H\": # 1mm (260 GHz) array, Horizontal Polarization\n tbdata = hdulist[1].data # 1H\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1h = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"1V\": # 1mm (260 GHz) array, Vertical Polarization\n tbdata = hdulist[2].data # 1V\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1v = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"2\": # 2mm (150 GHz) array\n tbdata = hdulist[3].data # 2\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq2 = np.sum(freq*tran)/np.sum(tran)\n\n ### Trim the zero-frequency listing, if any.\n gi=np.where(freq > 0)\n freq = freq[gi]\n tran = tran[gi]\n erro = erro[gi]\n atmt = atmt[gi]\n \n### Calculate Aperture efficiencies from information found at:\n### http://www.iram.es/IRAMES/mainwiki/Iram30mEfficiencies\n Beff = 0.630 # at 210 GHz\n Aeff = Beff/1.27 # See text on webpage\n srms = (66.0*u.um).to(\"m\") # surface RMS (microns)\n R210 = np.exp(-4.0*np.pi*(srms/(const.c/(2.1e11*u.s**-1))).value) #\n Gnot = Aeff/R210 # Unphysical, but see documentation...\n\n Larr = const.c.value/(freq*1.0e9) # Keep calm and carry on. \n Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n farr = freq\n \n#########################################################################\n\n if instrument == 'ACT90':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 65.0 # GHz\n fhig = 125.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n if instrument == 'ACT150':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 120.0 # GHz\n fhig = 180.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n\n return band, farr",
"def circuitSat(C):"
] | [
"0.5621422",
"0.5476572",
"0.5280086",
"0.5161614",
"0.5074294",
"0.49908358",
"0.49579656",
"0.48839134",
"0.48619267",
"0.48579392",
"0.48389107",
"0.48320952",
"0.4826791",
"0.48213837",
"0.48160282",
"0.4811936",
"0.48002857",
"0.4795177",
"0.47753403",
"0.4768853",
"0.4751825",
"0.4745213",
"0.4737876",
"0.47342184",
"0.4717476",
"0.47160357",
"0.46291357",
"0.46277666",
"0.4620215",
"0.46052235"
] | 0.76751333 | 0 |
Method opening all images to test their validity. | def verify_images(root_dir, root_listdir):
counter = 0
for index, image_dir in enumerate(root_listdir):
images_listdir = os.listdir(root_dir + "/" + image_dir)
list_of_images_indices = [
image_index
for image_index in range(3, len(images_listdir) - 1)
if image_index % 2 == 0
]
for image_ind in list_of_images_indices:
filename = root_dir + "/" + image_dir + "/" + images_listdir[image_ind]
try:
im = Image.open(filename)
im.verify()
im.close()
except (OSError, ValueError):
counter += 1
print("%d files caused error due to OSError and ValueError." % counter) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_images(self):\n self.roses.save_image()\n all_images = Images.get_all_images()\n self.assertTrue(len(all_images)<1)",
"def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True",
"def images_exist(self):\n pass",
"def test_read(self):\n for line in TESTIMAGES.split('\\n'):\n vals = line.strip().split()\n name = vals[0]\n logger.debug(\"Testing file %s\" % name)\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = raxisimage()\n obj.read(os.path.join(os.path.dirname(self.mar), name))\n\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin [%s,%s]\" % (mini, obj.getmin()))\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax [%s,%s]\" % (maxi, obj.getmax()))\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean [%s,%s]\" % (mean, obj.getmean()))\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev [%s,%s]\" % (stddev, obj.getstddev()))\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")\n self.assertNotEqual(obj.dim1, obj.dim2, \"dim2!=dim1\")",
"def check_files(self):\n print('checking files')\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)",
"def check_files(self):\n print('checking files')\n for f in tqdm(self.filenames):\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)",
"def testImagesPresent(self):\n\n result = self.app.get('/')\n\n images = result.html.find_all('img')\n\n # expect to find three images\n self.assertEqual(3, len(images), \"Wrong number of images found\")\n\n flowtows = result.html.find_all(class_='flowtow')\n\n image_list = self.images\n\n self.assertEqual(3, len(flowtows))\n\n # each contains the image, date, author and likes\n for index in range(3):\n div = flowtows[index]\n (path, date, user, likes) = image_list[index]\n\n self.assertIn(date, div.text)\n self.assertIn(user, div.text)\n # look for the number of likes\n self.assertIn(str(len(likes)+1), div.text, \"expected to find %d likes mentioned in:\\n\\n%s\" % (len(likes), div))\n\n # look for just one image\n img = div.find_all('img')\n self.assertEqual(1, len(img))",
"def check_files(self):\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is None:\n os.remove(f)",
"def check_images():\n saved_stdout, saved_stderr = sys.stdout, sys.stderr\n\n out, err = StringIO(), StringIO()\n try:\n sys.stdout, sys.stderr = out, err\n check_images_main()\n except SystemExit:\n pass\n finally:\n stdout, stderr = out.getvalue().strip(), err.getvalue().strip()\n sys.stdout, sys.stderr = saved_stdout, saved_stderr\n\n return stdout, stderr",
"def test_is_image(self):\n os.chdir(\"testimages/\")\n self.assertTrue(fileactions.is_image(\"arch_001.jpg\"))\n self.assertFalse(fileactions.is_image(\"not_an_image.jpg\"))",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")",
"def number_of_images_a_valid():\r\n counter = 0\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n counter += 1\r\n if counter >= int(number_of_images_a.get()):\r\n return True\r\n else:\r\n messagebox.showwarning(\"Invalid Image Inputs\", (\r\n \"Not enough images of type a to create \"\r\n \"requested grid.\"))\r\n return False",
"def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list",
"def _iter_images(self):\n raise NotImplementedError",
"def check_image_dimensions(image_paths, image_height, image_width):\n logging.info('Using image height, width %s', str((image_height, image_width)))\n\n bad_images = []\n\n for path in image_paths:\n logging.info('Trying to read image %s', path)\n image = microscopeimagequality.dataset_creation.read_16_bit_greyscale(path)\n\n if image.shape[0] < image_height or image.shape[1] < image_width:\n bad_images.append(path)\n logging.info('Image %s dimension %s is too small.', path, str(image.shape))\n\n logging.info('Done checking images')\n\n logging.info('Found %d bad images.', len(bad_images))\n\n if bad_images:\n raise ValueError('Found %d bad images! \\n %s' % (len(bad_images), '\\n'.join(bad_images)))",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def test_read_image(self):\n pass",
"def test_image_links(self):\r\n print('\\nTest image links: ', end='', flush=True)\r\n driver = self.driver\r\n driver.get(MY_URL)\r\n all_images = driver.find_elements_by_tag_name('img')\r\n for image in all_images:\r\n src = image.get_attribute('src')\r\n alt = image.get_attribute('alt')\r\n r = requests.get(src)\r\n assert r.status_code == 200, 'Bad http status (%d) for %s' % (r.status_code, src)\r\n assert len(alt) > 0, 'Missing or empty alt tag for %s' % (src)\r\n print('.', end=\"\", flush=True)\r\n if DEBUG:\r\n print ('Src=%s' % src)",
"def scan_images(self):\n rtn = 0\n mime_list = self.db.get_mime_list()\n (results,count) = datastore.find({})\n for f in results:\n dict = f.get_metadata().get_dictionary()\n if dict[\"mime_type\"] in mime_list:\n #record the id, file size, file date, in_ds\n self.db.create_picture_record(f.object_id, f.get_file_path())\n rtn += 1\n f.destroy()\n self.db.commit()\n _logger.debug('%s entries found in journal. Number of pictures %s'%(count,rtn,))\n return rtn",
"def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces",
"def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')",
"def __loadImage(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'image':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n\n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"undefined:/%s\" % base64.b64encode(imageData)\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following image file is missing: %s \" % fileName)\n \n file = QFile(fileName)\n if not file.open(QIODevice.ReadOnly):\n raise Exception(\"error opening image file %s\" % fileName )\n else:\n imageData= file.readAll()\n pr['value'] = \"local-tests:/%s\" % base64.b64encode(imageData)\n else:\n pass",
"def test_list_image(self):\n pass",
"def test_image(self):\n browser = self.layer.get_web_browser(smi_settings)\n\n image = self.layer.get_fixture('torvald.jpg')\n browser.login(self.username, self.username)\n self.assertEqual(browser.open('/root/edit'), 200)\n browser.macros.create(\n 'Silva Image', id='image', title='Torvald', file=image)\n self.assertEqual(\n browser.inspect.folder_listing, ['index', 'image'])\n\n # The user should by the last author on the content and container.\n self.assertEqual(\n self.root.sec_get_last_author_info().userid(),\n self.username)\n self.assertEqual(\n self.root.image.sec_get_last_author_info().userid(),\n self.username)\n\n # Visit the edit page\n self.assertEqual(\n browser.inspect.folder_listing['image'].click(),\n 200)\n self.assertEqual(browser.location, '/root/image/edit/tab_edit')\n\n # Change title\n form = browser.get_form('silvaObjects')\n self.assertEqual(\n form.get_control('field_image_title').value,\n 'Torvald')\n form.get_control('field_image_title').value = u'Picture of Torvald'\n form.get_control('submit:method').click()\n self.assertEqual(browser.inspect.feedback, ['Changes saved.'])\n\n # Change format\n form = browser.get_form('editform.scaling')\n self.assertEqual(form.get_control('field_web_format').value, 'JPEG')\n form.get_control('field_web_format').value = 'PNG'\n form.get_control('scale_submit:method').click()\n self.assertEqual(\n browser.inspect.feedback,\n ['Scaling and/or format changed.'])\n\n # Change scaling\n form = browser.get_form('editform.scaling')\n form.get_control('field_web_scaling').value = '100x200'\n form.get_control('scale_submit:method').click()\n self.assertEqual(\n browser.inspect.feedback,\n ['Scaling and/or format changed.'])\n\n # Change image\n form = browser.get_form('editform.upload')\n form.get_control('field_file').value = image\n form.get_control('upload_submit:method').click()\n self.assertEqual(\n browser.inspect.feedback,\n ['Image updated.'])\n\n self.assertEqual(\n browser.inspect.breadcrumbs,\n ['root', 'Picture of Torvald'])\n browser.inspect.breadcrumbs['root'].click()\n browser.macros.delete('image')",
"def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n self.images = []\n self.display_match = False\n self.useBlending = False\n print('found %d images' % len(self.files))",
"def _compare_images(self, ax, filename, tol=10):\n assert isinstance(ax, Artist)\n if GENERATE_BASELINE:\n savefig(os.path.join(BASELINE_DIR, filename))\n savefig(os.path.join(self.tempdir, filename))\n err = compare_images(os.path.join(BASELINE_DIR, filename),\n os.path.join(self.tempdir, filename),\n tol, in_decorator=True)\n if err:\n raise ImageComparisonFailure('images not close: %(actual)s '\n 'vs. %(expected)s '\n '(RMS %(rms).3f)' % err)",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def initImages(self):\n pass",
"def check_images():\n\n print(f'Looking for duplicate images...')\n\n for image in images_in_directory:\n duplicate = check_image_for_duplicates(image)\n\n if (duplicate):\n print(f'Found {duplicate} to be a duplicate image of: {image}')\n remove_image(duplicate)\n pass"
] | [
"0.6742113",
"0.62553185",
"0.6229397",
"0.6169011",
"0.61681324",
"0.6136686",
"0.60898274",
"0.60487515",
"0.5997627",
"0.5935533",
"0.58759665",
"0.58210576",
"0.5768145",
"0.5766127",
"0.5718156",
"0.5691568",
"0.5679888",
"0.56696707",
"0.5667723",
"0.5602127",
"0.5572963",
"0.55521595",
"0.5550424",
"0.55474514",
"0.55456865",
"0.5536247",
"0.55299145",
"0.55299145",
"0.55299145",
"0.552102"
] | 0.663587 | 1 |
for a given template and list of extensions, find every file related to that template which has one of the extensions. | def find_template_companion_files(template: Path, extensions: Iterable[str], recurse_up_to: Path = None) -> Set[Path]:
files_to_check = []
# Get a list of all file names to look for in each folder
data_file_names = []
basename = template.name.split('.')[0]
for i in range(len(template.suffixes)):
ext = ''.join(template.suffixes[:i+1])
for data_file_ext in extensions:
data_file_names.append(Path(basename + ext).with_suffix(data_file_ext))
# Look for those files in the template's current folder (a.k.a. parent directory)
files_to_check.extend([template.parent / file_name for file_name in data_file_names])
if recurse_up_to and recurse_up_to in template.parents:
# Look for those files in every parent directory up to `recurse_up_to`,
# excluding the template's parent directory which has already been checked
relative_path = template.parent.relative_to(recurse_up_to)
for folder in relative_path.parents:
for file in data_file_names:
files_to_check.append(recurse_up_to / folder / file)
return set([file for file in files_to_check if file.is_file()]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_files_by_extension(\n files: list ,\n extensions: list\n):\n filtered_files = []\n for file in files:\n file_ext = os.path.splitext(file)[-1].lower()\n file_ext = _remove_dot_from_extension(file_ext)\n for extension in extensions:\n ext = _remove_dot_from_extension(extension).lower()\n # print(\"ext \\n\", ext)\n # print(\"file_ext \\n\", file_ext)\n if file_ext == ext:\n filtered_files.append(file)\n\n return filtered_files\n ...",
"def find_template_files(dir_name):\n list_files = []\n for dirName, subdirList, fileList in os.walk(dir_name):\n # Construct file path relative to the dir_name.\n for file_name in fileList:\n fp = os.path.join(dirName, file_name)\n r = re.compile(\".+\\.template$\")\n if r.match(fp): # if the file is a .template...\n # Save the template file for later.\n print_debug(\"Found template file {}\".format(fp))\n list_files.append(fp)\n return list_files",
"def find_files(extensions):\n\n return [fname for fname in os.listdir('.') if fname.endswith(extensions)]",
"def _get_contents_by_ext(self, collection):\n contents_by_ext = defaultdict(list)\n collection_dir = os.path.join(self.root_dir, collection)\n for name in sorted(os.listdir(collection_dir)):\n path = os.path.join(collection_dir, name)\n if os.path.isfile(path):\n root, ext = os.path.splitext(name)\n contents_by_ext[ext].append(root)\n return contents_by_ext",
"def list_templates(\n self,\n extensions: t.Optional[t.Collection[str]] = None,\n filter_func: t.Optional[t.Callable[[str], bool]] = None,\n ) -> t.List[str]:\n assert self.loader is not None, \"No loader configured.\"\n names = self.loader.list_templates()\n\n if extensions is not None:\n if filter_func is not None:\n raise TypeError(\n \"either extensions or filter_func can be passed, but not both\"\n )\n\n def filter_func(x: str) -> bool:\n return \".\" in x and x.rsplit(\".\", 1)[1] in extensions # type: ignore\n\n if filter_func is not None:\n names = [name for name in names if filter_func(name)]\n\n return names",
"def select_files_with_ext(file_resources: Dict[str, str], ext: str, contains: Optional[str] = None) -> Dict[str, str]:\n subset_file_resources = {}\n for filename, filepath in file_resources.items():\n if not isinstance(filepath, str): continue\n if filename.endswith(ext) and (contains is None or contains in filename):\n subset_file_resources[filename] = filepath\n\n return subset_file_resources",
"def get_templates(template_folder, search_term=''):\n return [template for template in os.listdir(template_folder)\n if search_term in template]",
"def test_get_filenames_by_ext():\n tmpdir = os.path.join(tempfile.gettempdir(), \"jade-test-tmp87alkj8ew\")\n os.makedirs(tmpdir, exist_ok=True)\n\n data = {\"A\": 1, \"B\": 2}\n json_file = os.path.join(tmpdir, \"a.json\")\n dump_data(data, json_file)\n\n toml_file = os.path.join(tmpdir, \"b.toml\")\n dump_data(data, toml_file)\n\n filenames = get_filenames_by_ext(tmpdir, \".json\")\n assert \"a.json\" in next(filenames)\n\n filenames = get_filenames_by_ext(tmpdir, \".toml\")\n assert \"b.toml\" in next(filenames)",
"def find_files_by_extensions(cls, search_path, allowed_ext):\n file_list = []\n for root, dirnames, filenames in os.walk(search_path):\n for filename in filenames:\n name, extension = os.path.splitext(filename)\n if extension in allowed_ext:\n file_list.append(os.path.join(root, filename))\n\n return file_list",
"def _index(search_path, ext=None):\n\n if ext is None:\n ext = \"TCASE\"\n\n tcases = set([])\n for _, _, files in os.walk(search_path):\n for tc_fname in files:\n if os.path.splitext(tc_fname)[-1] in EXTS[ext]:\n tcases.add(tc_fname)\n\n return tcases",
"def get_templates(templates_path_pattern):\n templates_paths = glob.glob(templates_path_pattern)\n cars = []\n notcars = []\n for template_path in templates_paths:\n if 'non-vehicles' in template_path:\n notcars.append(template_path)\n else:\n cars.append(template_path)\n return cars, notcars",
"def _get_files(p, fs, extensions=None):\n p = Path(p)\n res = [\n p / f\n for f in fs\n if not f.startswith(\".\")\n and ((not extensions) or f'.{f.split(\".\")[-1].lower()}' in extensions)\n ]\n return res",
"def collect_files_with_extensions(self, extension: str) -> List[str]:\n occurrences = []\n for position in os.listdir(self.directory):\n if os.path.isdir(position):\n for file in os.listdir(position):\n if os.path.isfile(os.path.join(position, file)) and file.endswith(\n extension\n ):\n occurrences.append(os.path.join(self.directory, position, file))\n return occurrences",
"def extension_templates(self) -> List[str]:\n default = [self.extension_file(), \"mako\"]\n return self.options.get(\"extensions\").get(\"templates\", default)",
"def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])",
"def list_templates(extensions: Optional[List[str]] = None) -> List[str]:\n if environment is None or not hasattr(environment, 'loader'):\n return []\n return environment.list_templates(extensions=extensions)",
"def get_files_with_extension(self, extension=sys.argv[1]) -> list:\n if extension == \"\":\n raise EnvironmentError(\"No extension provided!\")\n\n result = []\n for idx, file in enumerate(self.file_list):\n if re.search(extension + \"$\", file):\n result.append(file)\n\n if len(result) == 0:\n raise Exception(\"No {} files found.\".format(extension))\n\n return result",
"def searchfiles(directory, filenames, ext=None):\n if ext:\n filenames = [f'{file}{ext}' for file in filenames]\n return [\n file for file in Path(directory).glob('*')\n if file.name in filenames\n ]",
"def get_matched_extensions(request):\n\n def _match(e):\n return e.obj if e.obj.matches(request) else None\n\n result = EXTENSION_MANAGER.map(_match)\n return filter(bool, result)",
"def find_template_filename(self, template_name):\n\n def next_file():\n filename = self.path / template_name\n yield filename\n try:\n exts = self.default_file_extensions\n except AttributeError:\n return\n\n strfilename = str(filename)\n for ext in exts:\n yield Path(strfilename + ext)\n\n for filename in next_file():\n if filename.is_file():\n return filename",
"def get_files(path, extension):\n extension = listify(extension)\n return [p for p in path.ls() if p.suffix in extension and \"(\" not in p.stem]",
"def get_preferable_files(project, input_template):\n preferable_files = PreferableFile.objects.filter(\n input_template=input_template\n )\n files = []\n if len(preferable_files) > 0:\n for file in project.files:\n if PreferableFile.match_any(file.filename, preferable_files):\n files.append(file)\n if len(files) > 0:\n return files\n return []",
"def search_data(templates, pols, matched_pols=False, reverse_nesting=False, flatten=False):\n # type check\n if isinstance(templates, str):\n templates = [templates]\n if isinstance(pols, (str, int)):\n pols = [pols]\n # search for datafiles\n datafiles = []\n datapols = []\n for pol in pols:\n dps = []\n dfs = []\n for template in templates:\n _dfs = glob.glob(template.format(pol=pol))\n if len(_dfs) > 0:\n dfs.extend(_dfs)\n dps.extend([pol for df in _dfs])\n if len(dfs) > 0:\n datafiles.append(sorted(dfs))\n datapols.append(dps)\n # get unique files\n allfiles = [item for sublist in datafiles for item in sublist]\n allpols = [item for sublist in datapols for item in sublist]\n unique_files = set()\n for _file in allfiles:\n for pol in pols:\n if f\".{pol}.\" in _file:\n unique_files.update({_file.replace(f\".{pol}.\", \".{pol}.\")})\n break\n unique_files = sorted(unique_files)\n # check for unique files with all pols\n if matched_pols:\n Npols = len(pols)\n _templates = []\n for _file in unique_files:\n goodfile = True\n for pol in pols:\n if _file.format(pol=pol) not in allfiles:\n goodfile = False\n if goodfile:\n _templates.append(_file)\n\n # achieve goal by calling search_data with new _templates that are polarization matched\n datafiles, datapols = search_data(_templates, pols, matched_pols=False, reverse_nesting=False)\n # reverse nesting if desired\n if reverse_nesting:\n datafiles = []\n datapols = []\n for _file in unique_files:\n dfs = []\n dps = []\n for pol in pols:\n df = _file.format(pol=pol)\n if df in allfiles:\n dfs.append(df)\n dps.append(pol)\n datafiles.append(dfs)\n datapols.append(dps)\n # flatten\n if flatten:\n datafiles = [item for sublist in datafiles for item in sublist]\n datapols = [item for sublist in datapols for item in sublist]\n\n return datafiles, datapols",
"def find_files(directory, extensions):\n res = set()\n for filename in os.listdir(directory):\n if filename.endswith(extensions):\n res.add(\"{}/{}\".format(directory, filename))\n return list(res)",
"def find_files(path='', ext='', level=None, typ=list, dirs=False, files=True, verbosity=0):\n path = expand_path(path)\n gen = generate_files(path, ext=ext, level=level, dirs=dirs, files=files, verbosity=verbosity)\n if isinstance(typ(), Mapping):\n return typ((ff['path'], ff) for ff in gen)\n elif typ is not None:\n return typ(gen)\n else:\n return gen",
"def get_files_from_of_type(path: str, ext: str) -> List[str]:\n files = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, \"*.\" + str(ext)):\n files.append(os.path.join(root, filename))\n if not files:\n logging.error(\"No language files found in folder: \" + str(os.sep.join([convert_vars.BASE_PATH, \"source\"])))\n logging.debug(f\" --- found {len(files)} files of type {ext}. Showing first few:\\n* \" + str(\"\\n* \".join(files[:3])))\n return files",
"def search_extension(path, ext):\n output = []\n for root, dirs, files in os.walk(path, topdown=True):\n for file in files:\n if file.endswith(ext):\n path = os.path.join(root, file)\n output.append(path)\n\n return output",
"def list_extensions():\n formats = FileFormat.list_formats()\n return render_template('home.html', formats=formats)",
"def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files",
"def get_files(template_path, resource_type, skip_customer_resources=False):\n try:\n json_files = []\n for file_path in find_files(template_path, '*.json'):\n folder_list = list(file_path.split(\"/\"))\n if not ('/customer/' in file_path.lower() and skip_customer_resources):\n folder_name = list(folder_list[-1].split('.'))\n file_name = folder_name[-2]\n if resource_type in ['template', 'script', 'policy']:\n if '_ignore' not in file_path and not file_name.startswith('Blueprint_'):\n if folder_list[-2] == file_name:\n json_files.append(file_path)\n else:\n if file_name.startswith('Blueprint_'):\n folder_name = \"Blueprint_%s\" % folder_list[-2]\n if folder_name == file_name:\n json_files.append(file_path)\n else:\n json_file = open(file_path, 'r')\n content = json.dumps(json_file.read()).encode('utf-8')\n json_file.close()\n content = json.loads(content)\n blueprint_details = json.loads(content)\n bp_name = \"Blueprint_%s\" % blueprint_details.get('name')\n if bp_name == file_name:\n json_files.append(file_path)\n return json_files\n except Exception as e:\n sys.stderr.write(e.message)\n exit(1)"
] | [
"0.6474418",
"0.63438606",
"0.63118434",
"0.6272479",
"0.6263339",
"0.6136152",
"0.6116507",
"0.6069046",
"0.60550666",
"0.6040801",
"0.599823",
"0.5954619",
"0.5945622",
"0.59430933",
"0.5937887",
"0.58905417",
"0.58869183",
"0.5864783",
"0.58605796",
"0.58385235",
"0.5812919",
"0.5753414",
"0.5749823",
"0.5739253",
"0.57257795",
"0.5712119",
"0.56785357",
"0.5668057",
"0.56670326",
"0.5654497"
] | 0.74726915 | 0 |
Transform x elementwise through an affine function y = exp(s)x + t where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i) | def element_wise_affine(x, st, compute_jacobian=True):
es = torch.exp(st[..., 0])
t = st[..., 1]
logj = None
if compute_jacobian:
logj = torch.sum(torch.log(es), dim=-1)
return es * x + t, logj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inverse_element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(-st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * (x - t), logj",
"def affine(params, x):\n return np.dot(params['w'], x) + params['b']",
"def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _",
"def affineTransform(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b",
"def affine_transform(trans_mat, p0):\r\n n_data, n_dim = np.shape(p0)\r\n p0 = np.hstack((p0, np.ones((n_data, 1))))\r\n #return np.transpose(np.dot(np.transpose(trans_mat), np.transpose(p0)))\r\n return np.dot(p0, trans_mat)",
"def affine_transform(x, output_dim, name=None):\n\n w = tf.get_variable(name + \"_w\", [x.get_shape()[1], output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))\n b = tf.get_variable(name + \"_b\", [output_dim], initializer=tf.constant_initializer(0.0))\n\n return tf.matmul(x, w) + b",
"def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)",
"def affine_mult(affine, coordinates):\n return np.dot(coordinates, affine[:3, :3].T) + affine[:3, -1]",
"def apply_affine_transform(x, M):\n is1d = len(x.shape) == 1\n if is1d:\n x = np.expand_dims(x, axis=0)\n\n x_hom = np.concatenate(\n [x, np.ones((x.shape[0], 1), dtype=x.dtype)], axis=-1\n )\n x_out = x_hom @ M.T\n if is1d:\n x_out = np.squeeze(x_out, axis=0)\n return x_out",
"def transformAffine(self, coords):\n coordsshape = coords.shape\n dims = coordsshape[0] + 1\n coords = coords.reshape((len(coords), -1))\n coords = np.concatenate((coords, np.ones((1, len(coords[0])))), 0)\n affine = np.eye(dims)\n # now transform first to center:\n meanvec = np.mean(coords, 1)\n center = np.eye(dims)\n center[:-1, -1] = -meanvec[:-1]\n affine = np.matmul(center, affine)\n\n if np.sum(self.shift):\n affine[:-1, -1] += (self.deformrandomstate.rand(dims - 1) - 0.5) * np.float32(self.shift)\n if np.max(self.scaling) > 1:\n scales = np.ones(dims)\n # scales[:-1] = (self.deformrandomstate.rand(dims-1)-0.5)*(self.scaling-1.0/self.scaling)+(self.scaling+1/self.scaling)/2\n scales[:-1] = self.scaling ** (self.deformrandomstate.rand(dims - 1) * 2 - 1)\n scales = np.diag(scales)\n # print(scales)\n affine = np.matmul(scales, affine)\n if np.sum(self.rotation):\n affine = self._rotate(affine)\n # move back to location:\n center[:-1, -1] = -center[:-1, -1]\n affine = np.matmul(center, affine)\n # now appyl to coords:\n coords = np.matmul(affine, coords)\n coords = coords[:-1]\n coords = coords.reshape(coordsshape)\n return coords",
"def transAffine2D( iScale=(1, 1), iTrans=(0, 0), iRot=0, iShear=(0, 0) ): \n iRot = iRot * np.pi / 180\n oMatScale = np.matrix( ((iScale[0],0,0),(0,iScale[1],0),(0,0,1)) )\n oMatTrans = np.matrix( ((1,0,iTrans[0]),(0,1,iTrans[1]),(0,0,1)) )\n oMatRot = np.matrix( ((np.cos(iRot),-np.sin(iRot),0),\\\n (np.sin(iRot),np.cos(iRot),0),(0,0,1)) )\n oMatShear = np.matrix( ((1,iShear[0],0),(iShear[1],1,0),(0,0,1)) )\n # ustvari izhodno matriko\n oMat2D = oMatTrans * oMatShear * oMatRot * oMatScale\n return oMat2D",
"def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()",
"def estimate_stage_affine(t0, t1):\n src = np.array([t.tforms[0].translation for t in t0])\n dst = np.array([t.tforms[1].translation for t in t1])\n aff = renderapi.transform.AffineModel()\n aff.estimate(src, dst)\n return aff",
"def affine_forward(x, W, b):\r\n x2d = np.reshape(x, (x.shape[0], -1)) # convert 4D input matrix to 2D \r\n out = np.dot(x2d, W) + b # linear transformation\r\n cache = (x, W, b) # keep for backward step (stay with us)\r\n return out, cache",
"def calc_affine(df):\n\tx0 = df.columns[0]\n\ty0 = df.index[0]\n\tdx = df.columns[1] - df.columns[0]\n\tdy = df.index[1] - df.index[0]\n\t\n\tt = affine.Affine(dx, 0, x0 , 0, dy ,y0 - dy) \n\t# y0 - dy because anker point is in the south!\n\treturn t",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def affine_forward(x, w, b):\n out = None\n ###########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n t = np.reshape(x,(x.shape[0],np.prod(np.shape(x)[1:])))\n \n\n out = np.dot(t,w) + b\n \n #print(np.shape(out))\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b)\n return out, cache",
"def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine",
"def get_affine_matrix2d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angle: torch.Tensor,\n sx: Optional[torch.Tensor] = None,\n sy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h",
"def get_affine_matrix2d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angle: Tensor,\n sx: Tensor | None = None,\n sy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h",
"def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,\n row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0., order=1):\n if scipy is None:\n raise ImportError('Image transformations require SciPy. '\n 'Install SciPy.')\n transform_matrix = None\n if theta != 0:\n theta = np.deg2rad(theta)\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shift_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear = np.deg2rad(shear)\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shear_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = zoom_matrix\n else:\n transform_matrix = np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(\n transform_matrix, h, w)\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [ndimage.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=order,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x",
"def __affine_geo_transformation(x, y, gtr):\n\n # https://gdal.org/user/raster_data_model.html#affine-geotransform\n # Affine transformation rewritten for rasterio:\n gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]\n gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]\n\n return gtr_x, gtr_y",
"def fit_transform(self, x: Array2D) -> Array2D:",
"def get_affine(x, m, c):\n x = m*x + c\n return x",
"def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache",
"def affine_forward(X, W, b):\n return np.dot(X, W) + b",
"def apply_affine(A: Affine, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n\n shape = x.shape\n\n A = np.asarray(A).reshape(3, 3) # type: ignore[assignment]\n t = A[:2, -1].reshape((2, 1)) # type: ignore[index]\n A = A[:2, :2] # type: ignore[index]\n\n x, y = A @ np.vstack([x.ravel(), y.ravel()]) + t\n x, y = (a.reshape(shape) for a in (x, y))\n return (x, y)",
"def affine_forward(x, w, b):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.dot(x,w)+b\n cache=(x,w,b)\n return(out, cache)",
"def affine_forward(x, w, b):\n ############################################################################\n # TODO: Implement the affine forward pass. Store the result in 'out'. You #\n # will need to reshape the input into rows. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n D,M = w.shape\n # reshape get a new x\n new_x = x.reshape(N,D)\n # get the output\n out = np.dot(new_x,w) + np.expand_dims(b,axis=0)\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out",
"def get_transform(ds):\n\n if 'transform' in ds.attrs:\n ds_trans = ds.attrs['transform']\n if isinstance(ds_trans, Affine):\n return ds_trans\n else:\n return Affine(*ds_trans)\n\n elif 'crs' in ds.data_vars and 'i2m' in ds.data_vars['crs'].attrs:\n transf_str = ds.data_vars['crs'].attrs['i2m']\n a = list(map(float, transf_str.split(',')))\n return Affine(a[0], a[2], a[4], a[1], a[3], a[5])\n\n else:\n resx, resy = get_resolution(ds)\n xoff = ds['x'].values.min()\n yoff = ds['y'].values.max()\n return Affine(resx, 0, xoff, 0, resy, yoff)"
] | [
"0.7174713",
"0.6328652",
"0.6287944",
"0.6152823",
"0.6144677",
"0.5968129",
"0.5957975",
"0.5921042",
"0.5909272",
"0.5798729",
"0.5789589",
"0.5777053",
"0.56403214",
"0.5585435",
"0.55693203",
"0.5553606",
"0.5544687",
"0.5482424",
"0.5461267",
"0.54540503",
"0.54315287",
"0.5431089",
"0.54214513",
"0.5414109",
"0.541142",
"0.5406962",
"0.53979105",
"0.53966075",
"0.5387316",
"0.53590506"
] | 0.8046413 | 0 |
Transform x elementwise through an affine function y = exp(s)(x t) where s = st[...,0] and t = st[...,1] with s.shape == x.shape == t.shape This is the inverse of `element_wise_affine` above for the same set of parameters st The Jacobian for this transformation is the coordinatewise product of the scaling factors J = prod(es[...,i],i) | def inverse_element_wise_affine(x, st, compute_jacobian=True):
es = torch.exp(-st[..., 0])
t = st[..., 1]
logj = None
if compute_jacobian:
logj = torch.sum(torch.log(es), dim=-1)
return es * (x - t), logj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def element_wise_affine(x, st, compute_jacobian=True):\n es = torch.exp(st[..., 0])\n t = st[..., 1]\n logj = None\n if compute_jacobian:\n logj = torch.sum(torch.log(es), dim=-1)\n\n return es * x + t, logj",
"def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _",
"def affine(params, x):\n return np.dot(params['w'], x) + params['b']",
"def affine_mult(affine, coordinates):\n return np.dot(coordinates, affine[:3, :3].T) + affine[:3, -1]",
"def affineTransform(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b",
"def affine_transform(x, output_dim, name=None):\n\n w = tf.get_variable(name + \"_w\", [x.get_shape()[1], output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))\n b = tf.get_variable(name + \"_b\", [output_dim], initializer=tf.constant_initializer(0.0))\n\n return tf.matmul(x, w) + b",
"def apply_affine_transform(x, M):\n is1d = len(x.shape) == 1\n if is1d:\n x = np.expand_dims(x, axis=0)\n\n x_hom = np.concatenate(\n [x, np.ones((x.shape[0], 1), dtype=x.dtype)], axis=-1\n )\n x_out = x_hom @ M.T\n if is1d:\n x_out = np.squeeze(x_out, axis=0)\n return x_out",
"def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)",
"def affine_transform(trans_mat, p0):\r\n n_data, n_dim = np.shape(p0)\r\n p0 = np.hstack((p0, np.ones((n_data, 1))))\r\n #return np.transpose(np.dot(np.transpose(trans_mat), np.transpose(p0)))\r\n return np.dot(p0, trans_mat)",
"def apply_affine(A: Affine, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n\n shape = x.shape\n\n A = np.asarray(A).reshape(3, 3) # type: ignore[assignment]\n t = A[:2, -1].reshape((2, 1)) # type: ignore[index]\n A = A[:2, :2] # type: ignore[index]\n\n x, y = A @ np.vstack([x.ravel(), y.ravel()]) + t\n x, y = (a.reshape(shape) for a in (x, y))\n return (x, y)",
"def affine_forward(x, W, b):\r\n x2d = np.reshape(x, (x.shape[0], -1)) # convert 4D input matrix to 2D \r\n out = np.dot(x2d, W) + b # linear transformation\r\n cache = (x, W, b) # keep for backward step (stay with us)\r\n return out, cache",
"def affine_forward(x, w, b):\n out = None\n ###########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n t = np.reshape(x,(x.shape[0],np.prod(np.shape(x)[1:])))\n \n\n out = np.dot(t,w) + b\n \n #print(np.shape(out))\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b)\n return out, cache",
"def exp(tensor):\n return _elementary_op(tensor, np.exp, np.exp)",
"def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def affine_forward(X, W, b):\n return np.dot(X, W) + b",
"def affine_forward(x, w, b):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.dot(x,w)+b\n cache=(x,w,b)\n return(out, cache)",
"def affine_forward(x, w, b):\n out = None\n ###########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n ###########################################################################\n reshaped_inp = np.reshape(x,(int(x.shape[0]),int(np.prod(x.shape) / x.shape[0])))\n out = reshaped_inp.dot(w) + b\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b)\n return out, cache",
"def affine_forward(x, w, b):\n ############################################################################\n # TODO: Implement the affine forward pass. Store the result in 'out'. You #\n # will need to reshape the input into rows. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n D,M = w.shape\n # reshape get a new x\n new_x = x.reshape(N,D)\n # get the output\n out = np.dot(new_x,w) + np.expand_dims(b,axis=0)\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out",
"def affine_forward(x, w, b):\n out = None\n \n # reshape the input into (N, d_1 *...* d_k)\n input_shape = x.shape\n prod = 1\n for i in range(1,len(input_shape)):\n prod *= input_shape[i]\n\n a = x.reshape(x.shape[0],prod)\n out = np.dot(a,w) + b\n \n cache = (x, w, b)\n return out, cache",
"def affine_forward(x, w, b):\n out = None\n ###########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n ###########################################################################\n dim_size = x[0].shape\n X = x.reshape(x.shape[0], np.prod(dim_size))\n out = X.dot(w) + b\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b)\n return out, cache",
"def transformAffine(self, coords):\n coordsshape = coords.shape\n dims = coordsshape[0] + 1\n coords = coords.reshape((len(coords), -1))\n coords = np.concatenate((coords, np.ones((1, len(coords[0])))), 0)\n affine = np.eye(dims)\n # now transform first to center:\n meanvec = np.mean(coords, 1)\n center = np.eye(dims)\n center[:-1, -1] = -meanvec[:-1]\n affine = np.matmul(center, affine)\n\n if np.sum(self.shift):\n affine[:-1, -1] += (self.deformrandomstate.rand(dims - 1) - 0.5) * np.float32(self.shift)\n if np.max(self.scaling) > 1:\n scales = np.ones(dims)\n # scales[:-1] = (self.deformrandomstate.rand(dims-1)-0.5)*(self.scaling-1.0/self.scaling)+(self.scaling+1/self.scaling)/2\n scales[:-1] = self.scaling ** (self.deformrandomstate.rand(dims - 1) * 2 - 1)\n scales = np.diag(scales)\n # print(scales)\n affine = np.matmul(scales, affine)\n if np.sum(self.rotation):\n affine = self._rotate(affine)\n # move back to location:\n center[:-1, -1] = -center[:-1, -1]\n affine = np.matmul(center, affine)\n # now appyl to coords:\n coords = np.matmul(affine, coords)\n coords = coords[:-1]\n coords = coords.reshape(coordsshape)\n return coords",
"def affine_forward(x, w, b):\n out = None\n ########################################################################\n # TODO: Implement the affine forward pass. Store the result in out. #\n # You will need to reshape the input into rows. #\n ########################################################################\n\n x_reshaped = x.reshape(x.shape[:1] + (-1,))\n out = x_reshaped.dot(w) + b\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n cache = (x, w, b)\n return out, cache",
"def affine_forward(x, w, b):\n N = x.shape[0]\n\n # reshape input into rows\n output = x.reshape([N, -1]).dot(w) + b\n cache = (x, w, b)\n\n return output, cache",
"def affine_forward(x, w, b):\n out = None\n x_shape = x.shape\n x_reshaped = x.reshape(x_shape[0], np.prod(x_shape[1:]))\n out = np.dot(x_reshaped, w) + b\n cache = (x, w, b)\n return out, cache",
"def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()",
"def affine_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0., order=1):\n # transform_matrix = transform_matrix_offset_center()\n # asdihasid\n # asd\n\n x = np.rollaxis(x, channel_index, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [\n ndi.interpolation.affine_transform(\n x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval\n ) for x_channel in x\n ]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_index + 1)\n return x",
"def estimate_stage_affine(t0, t1):\n src = np.array([t.tforms[0].translation for t in t0])\n dst = np.array([t.tforms[1].translation for t in t1])\n aff = renderapi.transform.AffineModel()\n aff.estimate(src, dst)\n return aff",
"def get_affine(x, m, c):\n x = m*x + c\n return x",
"def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n \n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)"
] | [
"0.83168423",
"0.6524651",
"0.6448697",
"0.6208682",
"0.6185584",
"0.60859853",
"0.60077345",
"0.6005692",
"0.6002416",
"0.585732",
"0.5842322",
"0.57764435",
"0.5741281",
"0.5718799",
"0.57156426",
"0.57109356",
"0.5676722",
"0.56560165",
"0.5641633",
"0.56377053",
"0.56344235",
"0.5631245",
"0.56272626",
"0.56263775",
"0.5614629",
"0.55554545",
"0.5553882",
"0.5552548",
"0.55399966",
"0.55360585"
] | 0.77893174 | 1 |
Initialize the axis ranges from proviuded Plot or renderer. | def initialize_axis_ranges(self, plot, transform=None):
if transform is None:
def transform(x):
return x
elif isinstance(transform, int):
ndigits = transform
def transform(x):
return round(x, ndigits)
# Avoid UI polluting with non-sensical digits
self.x_axis_range_low = transform(plot.x_axis.mapper.range.low)
self.auto_x_axis_range_low = self.x_axis_range_low
self.x_axis_range_high = transform(plot.x_axis.mapper.range.high)
self.auto_x_axis_range_high = self.x_axis_range_high
self.y_axis_range_low = transform(plot.y_axis.mapper.range.low)
self.auto_y_axis_range_low = self.y_axis_range_low
self.y_axis_range_high = transform(plot.y_axis.mapper.range.high)
self.auto_y_axis_range_high = self.y_axis_range_high | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()",
"def initialize_plot(self, ranges=None):\n raise NotImplementedError",
"def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])",
"def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_params.get('lon_0', 0)\n x_range, y_range = type(self)._axes_domain(self, *args, **kwargs)\n x_range = np.asarray(x_range) + lon_0\n return x_range, y_range",
"def initAxisValues(self, axis):\n \n if (axis != None):\n if self.isTime:\n self.axisValues = [repr(t.tocomponent())\n for t in axis.asRelativeTime()]\n else:\n self.axisValues = axis.getValue()\n else:\n raise TypeError(\"Error: axis is not defined\")\n\n self.axisIndices = range(len(self.axisValues))\n self.updateMin(0)\n self.updateMax(len(self.axisValues) - 1)",
"def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )",
"def __init__(self, fig, variables, ranges, n_ordinate_levels=6):\n angles = np.arange(0, 360, 360./len(variables))\n axes = [fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True,\n label=\"axes{}\".format(i)) for i in range(len(variables))]\n for ax in axes[1:]:\n ax.patch.set_visible(False)\n ax.grid(\"off\")\n ax.xaxis.set_visible(False)\n for i, ax in enumerate(axes):\n grid = np.linspace(*ranges[i], num=n_ordinate_levels)\n gridlabel = [\"{}\".format(round(x, 2)) for x in grid]\n if ranges[i][0] > ranges[i][1]:\n grid = grid[::-1] # hack to invert grid\n gridlabel[0] = \"\" # clean up origin\n set_rgrids(ax, grid, labels=gridlabel, angle=angles[i])\n ax.set_ylim(*ranges[i])\n # variables for plotting\n self.angle = np.deg2rad(np.r_[angles, angles[0]])\n self.ranges = ranges\n self.ax = axes[0]",
"def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()",
"def set_range(self, axis: int, range: Sequence[Union[int, float]]):\n if axis < 0:\n axis += self.ndim\n if axis < 0:\n raise ValueError(\n f'axis is negative, expected positive, got {axis}'\n )\n if self.range[axis] != range:\n self._range[axis] = range\n self.events.range(axis=axis)",
"def set_range(\n self,\n axis: Union[int, Sequence[int]],\n _range: Union[\n Sequence[Union[int, float]], Sequence[Sequence[Union[int, float]]]\n ],\n ):\n if isinstance(axis, Integral):\n axis = assert_axis_in_bounds(axis, self.ndim) # type: ignore\n if self.range[axis] != _range:\n full_range = list(self.range)\n full_range[axis] = _range\n self.range = full_range\n else:\n full_range = list(self.range)\n # cast range to list for list comparison below\n _range = list(_range) # type: ignore\n axis = tuple(axis) # type: ignore\n if len(axis) != len(_range):\n raise ValueError(\n trans._(\"axis and _range sequences must have equal length\")\n )\n if _range != full_range:\n for ax, r in zip(axis, _range):\n ax = assert_axis_in_bounds(int(ax), self.ndim)\n full_range[ax] = r\n self.range = full_range",
"def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)",
"def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")",
"def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None",
"def _ps_init(self):\n\n self.ps_ax.set_xlim(-np.pi, np.pi)\n self.ps_ax.set_ylim(-10, 10)\n self.ps_ax.set_xlabel(\"degree [rad]\")\n self.ps_ax.set_ylabel(\"velocity [rad/s]\")\n for ap in self.ps_plots:\n ap.set_data([], [])\n return self.ps_plots",
"def set_initial_dims(self, axis, insert=False):\n if insert:\n # Insert default values\n # Range value is (min, max, step) for the entire slider\n self._range.insert(axis, (0, 2, 1))\n # Point is the slider value if in point mode\n self._point.insert(axis, 0)\n # Interval value is the (min, max) of the slider selction\n # if in interval mode\n self._interval.insert(axis, (0, 1))\n self._mode.insert(axis, DimsMode.POINT)\n cur_order = [o if o < axis else o + 1 for o in self.order]\n self._order = [axis] + cur_order\n else:\n # Range value is (min, max, step) for the entire slider\n self._range[axis] = (0, 2, 1)\n # Point is the slider value if in point mode\n self._point[axis] = 0\n # Interval value is the (min, max) of the slider selction\n # if in interval mode\n self._interval[axis] = (0, 1)\n self._mode[axis] = DimsMode.POINT\n self._order[axis] = axis",
"def update_plots_using_region(self):\n self.frequency_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.resistance_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.temperature_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.pressure_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.humidity_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)",
"def render_range_init():\n\n # Adding/Checking ftrack render range attribute\n defaultRenderGlobals = pm.PyNode(\"defaultRenderGlobals\")\n render_range_set = False\n if hasattr(defaultRenderGlobals, \"ftrackRenderRangeSet\"):\n attr = pm.Attribute(\"defaultRenderGlobals.ftrackRenderRangeSet\")\n render_range_set = attr.get()\n else:\n pm.addAttr(\n defaultRenderGlobals,\n longName=\"ftrackRenderRangeSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not render_range_set:\n\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n startFrame = float(task.getParent().get(\"fstart\"))\n endFrame = float(task.getParent().get(\"fend\"))\n\n handles = float(task.getParent().get(\"handles\"))\n\n mc.warning(\n \"Setting render range to {0} {1} \".format(startFrame, endFrame)\n )\n\n # Add handles to start and end frame\n hsf = startFrame - handles\n hef = endFrame + handles\n\n defaultRenderGlobals.animation.set(True)\n defaultRenderGlobals.animationRange.set(1)\n defaultRenderGlobals.startFrame.set(hsf)\n defaultRenderGlobals.endFrame.set(hef)\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.animType.set(1)",
"def _plot_init(self):\n pass",
"def _plot_init(self):\n pass",
"def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])",
"def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)",
"def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)",
"def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]",
"def setAxisParts(lowx='all', lefty='all', upx='ticks', righty='ticks'):\n partdict = {'none':'NONE','lines':'LINE','ticks':'TICKS',\n 'labels':'LABELS', 'all':'NAME'} \n dislin.setgrf(partdict[lowx], partdict[lefty],\\\n partdict[upx], partdict[righty])",
"def _init(self) -> List[PlotType]:\n self.plots[0].set_data([], [], 'bx', markersize=5)\n self.plots[1].set_data([], [], 'r.', markersize=15)\n return self.plots",
"def __init__(self, axis1, axis2=None, bins=100, same_scale=False,\n axis1_values=None, axis2_values=None, **kwargs):\n self.same_scale = same_scale\n\n self.axis1 = axis1\n self.axis1_limits = None\n\n if isinstance(axis1_values, (float, int)):\n axis1_values = [axis1_values]\n self.axis1_values = axis1_values\n\n self.axis2 = axis2\n self.axis2_limits = None\n if isinstance(axis2_values, (float, int)):\n axis2_values = [axis2_values]\n self.axis2_values = axis2_values\n\n self.bins = bins\n\n self.plot_options = kwargs",
"def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()",
"def setRange(self, x_range, y_range):\n self._visualiser._plt.setRange(xRange=x_range, yRange=y_range)"
] | [
"0.71792495",
"0.7129534",
"0.68548447",
"0.67877525",
"0.6537042",
"0.64943486",
"0.641954",
"0.6235867",
"0.6223747",
"0.6188791",
"0.61873025",
"0.61726326",
"0.6160985",
"0.6133905",
"0.61263645",
"0.61139727",
"0.61138016",
"0.61124223",
"0.60551167",
"0.60020185",
"0.60020185",
"0.59837145",
"0.5980665",
"0.59750354",
"0.59656835",
"0.5928879",
"0.5917828",
"0.590145",
"0.58835524",
"0.5881805"
] | 0.7375276 | 0 |
Create an archive from the given tree, upload, and untar it. | def upload_tar_from_git():
require("release", provided_by=[deploy])
tree = prompt("Please enter a branch or SHA1 to deploy", default="master")
local("git archive --format=tar %s | gzip > %s.tar.gz" % (tree, env['release']))
sudo("mkdir %(path)s/releases/%(release)s" % env)
put("%(release)s.tar.gz" % env, "%(path)s/packages/" % env, use_sudo=True)
sudo("cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz" % env)
local("rm %(release)s.tar.gz" % env) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))",
"def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")",
"def untar(tar_path, cleanup=False):\n tfile = tarfile.open(tar_path, 'r')\n tfile.extractall(os.path.dirname(tar_path))\n tfile.close()\n if cleanup:\n os.remove(tar_path)",
"def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))",
"def untar(file_path, target_dir=None, gzipped=True, verbose=False):\n return posix.untar(file_path, target_dir, gzipped, verbose)",
"def create_tarball(fileobj, path, callback=None, compression_level=None):\n tar_cmd = [\"tar\", \"-zc\", \"--directory=%s\" % path, \".\"]\n env = os.environ.copy()\n if compression_level and 1 <= compression_level <= 9:\n env[\"GZIP\"] = \"-%d\" % compression_level\n tar_proc = make_subprocess(tar_cmd, stdout=True, stderr=True, env=env)\n\n try:\n while True:\n chunk = tar_proc.stdout.read(CHUNK_SIZE)\n if chunk == '':\n break\n\n if callback:\n callback(chunk)\n\n if fileobj:\n fileobj.write(chunk)\n except Exception:\n try_kill_process(tar_proc)\n raise\n\n finish_subprocess(tar_proc, tar_cmd)",
"def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)",
"def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )",
"def upload_artifact(revision):\n # we upload the file from the local /tmp to the remote /tmp dir\n tmp_path = '/tmp/{revision}.tar.gz'.format(revision=revision)\n put(tmp_path, tmp_path)\n\n destination_path = '{base}/{revision}'.format(base=BASE_PATH,\n revision=revision)\n untar(tmp_path, destination_path)\n\n # remove both local and remote archives\n run('rm {}'.format(tmp_path))\n local('rm {}'.format(tmp_path))",
"def pack(archive: Union[Path, str],\n paths: List[Union[Path, str]],\n cwd: Optional[Path] = None,\n exclude: Optional[List[Union[Path, str]]] = ()):\n archive = Path(archive)\n if cwd is None:\n cwd = Path.cwd()\n if archive.suffix == '.xz':\n archive = archive.with_suffix('')\n\n # Make sure all the paths have sane permissions.\n def walk(path):\n if path.is_symlink():\n return\n elif path.is_dir():\n # All dirs should be 755.\n mode = path.stat().st_mode & 0o777\n if mode != 0o755:\n path.chmod(0o755)\n\n for subpath in path.glob('*'):\n walk(subpath)\n elif path.is_file():\n # All scripts should be 755 while other files should be 644.\n mode = path.stat().st_mode & 0o777\n if mode in (0o755, 0o644):\n return\n if mode & 0o111:\n path.chmod(0o755)\n else:\n path.chmod(0o644)\n else:\n raise ValueError(f'{path}: unknown file type')\n\n logging.info('Forcing sane permissions on inputs')\n for path in paths:\n walk(cwd / path)\n\n logging.info('Creating %s tarball', archive.name)\n # We use relpath here to help out tar on platforms where it doesn't like\n # paths with colons in them (e.g. Windows). We have to construct the full\n # before running through relpath as relative archives will implicitly be\n # checked against os.getcwd rather than the explicit cwd.\n tar = os.path.relpath(cwd / archive, cwd)\n run(['tar', '--owner=0', '--group=0', '-cf', tar] +\n [f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd)\n\n logging.info('Compressing tarball')\n run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)",
"def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()",
"def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())",
"def save_tar(self, target_dir):\n # type: (Text) -> None\n if not os.path.isdir(target_dir):\n raise ValueError('target_dir %r not found.' % target_dir)\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n blob = self.bucket.blob(filekey)\n blob.upload_from_filename(tarname)",
"def handle_tar(file_path, extension, extracted_path, destination_directory):\n tar = tarfile.open(file_path, extension)\n # remove files if they already exist\n if os.path.exists(extracted_path):\n shutil.rmtree(extracted_path)\n tar.extractall(path=destination_directory)\n tar.close()",
"def _expand_archive(self, name):\r\n target = path(self.temp_dir) / uuid.uuid4().hex\r\n os.mkdir(target)\r\n with tarfile.open(self.data_dir / name) as tar_file:\r\n tar_file.extractall(path=target)\r\n\r\n return target",
"def untar(input_filename, extract_dir):\n try:\n tar_ds = tarfile.open(input_filename)\n except tarfile.TarError:\n raise ValueError(\"%s is not a tar file\" % (input_filename))\n tar_ds.extractall(path=extract_dir)\n tar_ds.close()",
"def untar(tarfile, outdir):\n tmpdir = tempfile.mkdtemp()\n try:\n untared = _open_archive(tarfile, tmpdir)\n files = [f for f in untared if os.path.isfile(os.path.join(tmpdir, f))]\n dirs = [d for d in untared if os.path.isdir(os.path.join(tmpdir, d))]\n assert len(files) + len(dirs) == len(untared), 'Only files and directories'\n if _files_same(tmpdir, outdir, files) and _dirs_same(tmpdir, outdir, dirs):\n # Nothing new or different in the tarfile.\n return False\n # Some or all of the files / directories are new.\n _move_files(tmpdir, outdir, files)\n _move_dirs(tmpdir, outdir, dirs)\n return True\n finally:\n if os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir)",
"def upload(project, private=None, site=None, username=None, token=None, suffix='.tar.bz2', log_level=None):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n # delete=True breaks on windows if you use tmp_tarfile.name to re-open the file,\n # so don't use delete=True.\n tmp_tarfile = NamedTemporaryFile(delete=False, prefix=\"anaconda_upload_\", suffix=suffix)\n tmp_tarfile.close() # immediately un-use it to avoid file-in-use errors on Windows\n try:\n status = archive(project, tmp_tarfile.name)\n if not status:\n return status\n status = client._upload(project,\n tmp_tarfile.name,\n uploaded_basename=(project.name + suffix),\n private=private,\n site=site,\n username=username,\n token=token,\n log_level=log_level)\n return status\n finally:\n os.remove(tmp_tarfile.name)",
"def archive(self, virtual_path_to_tar_files, root, target_name):\n\n\n # TODO: RSYNC and do a diff. if there are no changes, we can just skip this part of the dockerfile to maximize layering\n for x in virtual_path_to_tar_files:\n assert os.path.isabs(x)\n\n rel_to_root = [os.path.relpath(x, '/') for x in virtual_path_to_tar_files]\n real_path = [os.path.join(root, x) for x in rel_to_root ]\n\n tup = zip(virtual_path_to_tar_files, real_path)\n\n tar = tarfile.open(os.path.join(self.dir, target_name), 'w')\n\n for vp, rp in tup:\n tar.add(rp, arcname=vp)\n\n tar.close()\n\n self.df.add_docker_cmd('ADD %s /' % target_name)",
"def bulk_upload ( server, identity, src_dir, tgt_dir ) :\n tmp_tarfilepath = '/tmp/'\n tmp_tarfilename = server + '.tar.gz'\n tmp_file = tmp_tarfilepath + tmp_tarfilename\n\n # Tar up the src directory\n s = subprocess.call( [ '/bin/sh', '-c',\n 'cd ' + src_dir + ' && tar czf ' + tmp_file + ' .' ] )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Copy the tar file up to the server\n s = scp_call( server, identity, tmp_file, tmp_tarfilepath )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Unpack the tar file on the server\n s = ssh_call( server,\n identity,\n 'cd ' + tgt_dir + ' && sudo tar xzf ' + tmp_file + ' && rm ' + tmp_file + ' && sudo chown -R root:root *' )\n return s",
"def restore(self, archive):\n logger.info(\"Restoring an old archive run from {}\".format(archive))\n if os.path.isabs(archive):\n restorefile = archive\n else:\n restorefile = os.path.join(self.containerpath, const.ARCHIVEDIR, archive)\n with ignored(OSError):\n shutil.rmtree(os.path.join(self.rundir))\n with tarfile.open(restorefile, \"r:gz\") as f:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(f, self.rundir)\n self._refreshconfig()",
"def upload_tree(self, files: List[Path], params: Dict[str, str]) -> bool:\n temp_zip = Comm.create_zip_from_files(files)\n files_post = {'zipFile': open(temp_zip.name, 'rb')}\n response = requests.post(\n self.upload_url,\n data=params,\n files=files_post,\n )\n files_post['zipFile'].close()\n temp_zip.close()\n data = response.text\n self.upload_output = data\n good_upload = self.parse_upload()\n return good_upload",
"def _unpack_tar(self, dir, filters):\n try:\n unpackArchive = gbpc.UnpackTarArchive(self.path, dir, filters)\n unpackArchive()\n except gbpc.CommandExecFailed:\n # unpackArchive already printed an error message\n raise GbpError",
"def create_tarfile(source_dir, filename=\"/tmp/contents.tar.gz\"):\n try:\n # Define the default signal handler for catching: Ctrl-C\n signal.signal(signal.SIGINT, signal.default_int_handler)\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))\n\n except (OSError, IOError) as e:\n # OSError: [Errno 13] Permission denied\n if e.errno == errno.EACCES:\n source_dir = os.getcwd() if source_dir == '.' else source_dir # Expand cwd\n warn_purge_exit(info_msg=\"Permission denied. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"Permission denied. Make sure to have read permission \"\n \"for all the files and directories in the path: %s\")\n % (source_dir))\n # OSError: [Errno 28] No Space Left on Device (IOError on python2.7)\n elif e.errno == errno.ENOSPC:\n dir_path = os.path.dirname(filename)\n warn_purge_exit(info_msg=\"No space left. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"No space left when compressing your data in: %s.\\n\"\n \"Make sure to have enough space before uploading your data.\")\n % (os.path.abspath(dir_path)))\n\n except KeyboardInterrupt: # Purge tarball on Ctrl-C\n warn_purge_exit(info_msg=\"Ctrl-C signal detected: Removing compressed data...\",\n filename=filename,\n exit_msg=\"Stopped the data upload gracefully.\")",
"def archive(\n self,\n ostream: Union[TextIO, BinaryIO],\n treeish: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs: Any,\n ) -> Repo:\n if treeish is None:\n treeish = self.head.commit\n if prefix and \"prefix\" not in kwargs:\n kwargs[\"prefix\"] = prefix\n kwargs[\"output_stream\"] = ostream\n path = kwargs.pop(\"path\", [])\n path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)\n if not isinstance(path, (tuple, list)):\n path = [path]\n # end assure paths is list\n self.git.archive(\"--\", treeish, *path, **kwargs)\n return self",
"def save_tar(self, target_dir):\n # type: (Text) -> None\n\n if not os.path.isdir(target_dir):\n raise ValueError(\"Target directory '{}' not found.\".format(target_dir))\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n self.s3.Object(self.bucket_name, filekey).put(Body=open(tarname, 'rb'))",
"def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None",
"def untar_file(filename, location):\n if not os.path.exists(location):\n os.makedirs(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif (filename.lower().endswith('.bz2')\n or filename.lower().endswith('.tbz')):\n mode = 'r:bz2'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([member.name for member in tar.getmembers()])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n fn = split_leading_dir(fn)[1]\n path = os.path.join(location, fn)\n if member.isdir():\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError), e:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n continue\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n destfp = open(path, 'wb')\n try:\n shutil.copyfileobj(fp, destfp)\n finally:\n destfp.close()\n fp.close()\n finally:\n tar.close()",
"def put(self, obj):\n\n if obj is None:\n return\n\n assert os.path.exists(obj), f'path {obj} does not exist.'\n\n return shutil.make_archive(obj, 'tar', obj)",
"def make_tarball(base_name, base_dir, compress=\"gzip\", verbose=0, dry_run=0,\n owner=None, group=None):\n tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',\n 'compress': ''}\n compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',\n 'compress': '.Z'}\n\n # flags for compression program, each element of list will be an argument\n if compress is not None and compress not in compress_ext.keys():\n raise ValueError(\n \"bad value for 'compress': must be None, 'gzip', 'bzip2', \"\n \"'xz' or 'compress'\")\n\n archive_name = base_name + '.tar'\n if compress != 'compress':\n archive_name += compress_ext.get(compress, '')\n\n mkpath(os.path.dirname(archive_name), dry_run=dry_run)\n\n # creating the tarball\n import tarfile # late import so Python build itself doesn't break\n\n log.info('Creating tar archive')\n\n uid = _get_uid(owner)\n gid = _get_gid(group)\n\n def _set_uid_gid(tarinfo):\n if gid is not None:\n tarinfo.gid = gid\n tarinfo.gname = group\n if uid is not None:\n tarinfo.uid = uid\n tarinfo.uname = owner\n return tarinfo\n\n if not dry_run:\n tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])\n try:\n tar.add(base_dir, filter=_set_uid_gid)\n finally:\n tar.close()\n\n # compression using `compress`\n if compress == 'compress':\n warn(\"'compress' will be deprecated.\", PendingDeprecationWarning)\n # the option varies depending on the platform\n compressed_name = archive_name + compress_ext[compress]\n if sys.platform == 'win32':\n cmd = [compress, archive_name, compressed_name]\n else:\n cmd = [compress, '-f', archive_name]\n spawn(cmd, dry_run=dry_run)\n return compressed_name\n\n return archive_name"
] | [
"0.6552709",
"0.6145302",
"0.59912825",
"0.590548",
"0.5857982",
"0.5824896",
"0.5788129",
"0.57454973",
"0.5727448",
"0.5701735",
"0.5637337",
"0.5634134",
"0.5598457",
"0.55868477",
"0.5579905",
"0.5539658",
"0.5536656",
"0.5533471",
"0.5514017",
"0.5374604",
"0.53565294",
"0.5340064",
"0.53372705",
"0.5326481",
"0.5326471",
"0.5321925",
"0.53051096",
"0.5304542",
"0.5304428",
"0.5285305"
] | 0.6242641 | 1 |
Symlink to the new current release. | def symlink_current_release():
require("release", provided_by=[deploy])
with cd("%(path)s/releases" % env):
sudo("ln -s %(release)s current_tmp && mv -Tf current_tmp current" % env) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)",
"def symlink(timestamp):\n if exists(env.current_dir):\n run('rm -r %(current_dir)s' % env)\n run('ln -s %s %s' % (os.path.join(env.releases_dir, timestamp), env.current_dir))",
"def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath",
"def make_active(revision):\n run('ln -sfn {base}/{revision}/ {base}/newest'.format(base=BASE_PATH,\n revision=revision))",
"def mklinkto(self, oldname):\n error.checked_call(os.link, str(oldname), str(self))",
"def make_symlink(dbconfig, targ):\n if \"latest\" in dbconfig and not dbconfig[\"latest\"]:\n return\n link = re.sub(r'[0-9]+', 'latest', targ)\n try:\n os.symlink(targ, link)\n info(\"create link \" + link + \" --> \" + targ)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link)\n os.symlink(targ, link)\n info(\"move link \" + link + \" --> \" + targ)",
"def update_current_link(self, name: str):\n lnk = self.ws_current_link\n if lnk.is_symlink():\n lnk.unlink()\n if name is not None:\n lnk.symlink_to(name)\n self.ws_config_file.touch(exist_ok=True)",
"def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )",
"def switchRevision(deploymentPath, revision):\n with cd(deploymentPath):\n sudo('rm -f current')\n sudo('ln -s %s current' % revision)",
"def _activate_new_source(self, source_dir, active_version_symlinks):\n # Switch the symlink and use our new project\n logger.info(\"Activating new source via symlinks\")\n for symlink in active_version_symlinks:\n logger.info(\"Symlinking %s\", symlink)\n symlink_dir, _ = os.path.split(symlink)\n with hide(*fab_output_hides):\n sudo('mkdir -p %s' % symlink_dir)\n sudo('rm -f %s' % symlink)\n sudo('ln -s %s %s' % (source_dir, symlink))\n\n # Clean out any stale pycs that may have been generated by queued\n # up processes that were using the old symlink\n with hide(*fab_output_hides):\n sudo('find %s -name \"*.pyc\" -delete' % source_dir)",
"def update_link(self):\n try:\n relpath = os.path.relpath(self.path, os.path.dirname(self.link_path))\n os.symlink(relpath, self.link_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.unlink(self.link_path)\n os.symlink(self.path, self.link_path)",
"def symlink(path, v=False):\r\n if not os.path.exists(path):\r\n err(path + ' : no such file or directory')\r\n elif not os.path.isdir(path):\r\n err(path + ' : not a directory')\r\n else:\r\n theme_name = os.path.basename(os.path.normpath(path))\r\n theme_path = os.path.join(_THEMES_PATH, theme_name)\r\n if os.path.exists(theme_path):\r\n err(path + ' : already exists')\r\n else:\r\n if v:\r\n print(\"Linking `{p}' to `{t}' ...\".format(p=path, t=theme_path))\r\n try:\r\n os.symlink(path, theme_path)\r\n except Exception as e:\r\n err(\"Cannot link `{p}' to `{t}':\\n{e}\".format(p=path, t=theme_path, e=str(e)))",
"def ln(src, dst):\n os.symlink(src, dst)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)",
"def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))",
"def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)",
"def link(self):\n \n self.__enter__()\n return self.stable_path",
"def create_soft_link():\n vlogger_path = os.path.join(vlogger_dir, \"vlogger.py\")\n dir_path = os.path.expanduser(\"~\")\n bin_dir = os.path.join(dir_path, \"bin\")\n if not os.path.exists(bin_dir):\n os.mkdir(bin_dir)\n\n soft_path = os.path.join(bin_dir, \"vlogger\")\n\n if not os.path.exists(soft_path):\n command = [\"ln\", \"-s\", vlogger_path, soft_path]\n cmd_str = \" \".join(command)\n print(\"Soft link command for easy execution: {}\".format(cmd_str))\n subprocess.call([\"ln\", \"-s\", vlogger_path, soft_path])\n else:\n print(\"Soft link already created: {}\".format(soft_path))",
"def command_new_version(self):\n repoinit.new_version(*self.args())",
"def link(path, service_name, branch, username):\n slab_logger.log(15, 'Setting the current service to %s' % service_name)\n if service_name == \"current\":\n if os.path.isfile(os.path.join(path, \"current\")):\n currentf = open(os.path.join(path, \"current\"), 'r')\n currentf.seek(0)\n service_name = currentf.readline()\n else:\n slab_logger.error('Unable to determine the current service. '\n 'Please enter a service to work on.')\n return 1\n\n returncode = set_current_service(path, service_name)\n if not returncode == 0:\n slab_logger.error('Unable to write to \"current\" file')\n return 1\n\n if not os.path.islink(os.path.join(path, \"current_service\")):\n # Note: What to link is first arg, where to link is second aka src dest\n if os.path.isdir(os.path.join(path, \"services\", service_name)):\n os.symlink(os.path.join(path, \"services\", service_name),\n os.path.join(path, \"current_service\"))\n slab_logger.debug('Made symlink for %s' % service_name)\n return 0\n else:\n slab_logger.debug('Could not find source for symlink. '\n 'Attempting re-clone of %s.' % service_name)\n returncode = sync_service(path, branch, username, service_name)\n if returncode:\n os.symlink(os.path.join(path, \"services\", service_name),\n os.path.join(path, \"current_service\"))\n slab_logger.debug('Made symlink for %s' % service_name)\n return 0\n else:\n slab_logger.error(\"Failed to find source for symlink: \" +\n os.path.join(path, \"services\", service_name))\n return 1\n else:\n slab_logger.debug(\"Link already exists.\")\n return 0",
"def version_link(self):\n release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)\n return Markup(f\"<a href='{release_link}'>{self.DATASET_RELEASE}</a>\")",
"def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )",
"def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)",
"def _new_release_dir(self, connection):\n release_dir_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')\n commit_hash = self._get_commit_hash(connection)\n\n release_dir = f'{release_dir_timestamp}-{self.config.deployment_user}-{commit_hash}-{self.project_version}'\n print(blue(f\"Release directory set to {release_dir}\"))\n\n return release_dir",
"def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)",
"def promote_release(self, release):\n logger.info(\"Updating production alias with revision '{0}'\".format(\n release))\n if release.isdigit() or release == '$LATEST':\n version = release\n else:\n try:\n response = self.aws_lambda.get_alias(\n FunctionName=self.function_selected,\n Name=release\n )\n version = response['FunctionVersion']\n except self.aws_lambda.exceptions.ResourceNotFoundException:\n logger.error(\"Can't found the qualifier {0} for {1}\".format(\n release,\n self.function_selected\n ))\n return\n\n self.update_or_create_alias(version, 'production')",
"def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)",
"def __enter__(self):\n if self.stable_path:\n return self.stable_path\n \n _, file_name = os.path.split(self._source_path)\n stable_dir = tempfile.mkdtemp(prefix=\"%s-\" % file_name)\n assert self._source_path.startswith(\"/\")\n stable_path = os.path.join(stable_dir, self._source_path[1:])\n \n self.log.debug(\"Linking %s to point to %s\", stable_path, \n self._source_path)\n ensure_dir(os.path.dirname(stable_path))\n try:\n os.link(self._source_path, stable_path)\n except (EnvironmentError) as e:\n if e.errno == errno.ENOENT:\n return None\n raise\n \n self._stable_dir = stable_dir\n self.stable_path = stable_path\n return self",
"def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)"
] | [
"0.85483825",
"0.7888362",
"0.6896718",
"0.6580014",
"0.640067",
"0.6382023",
"0.609916",
"0.59879214",
"0.598062",
"0.58782184",
"0.5851017",
"0.5815926",
"0.5792104",
"0.57686156",
"0.57686156",
"0.57261956",
"0.57167125",
"0.5602009",
"0.55953795",
"0.55733645",
"0.5570685",
"0.5562101",
"0.5553313",
"0.55281484",
"0.5523766",
"0.5522215",
"0.54879534",
"0.54795873",
"0.54761964",
"0.54414445"
] | 0.8182266 | 1 |
Remove older releases, keeping the last `keep_num` intact. | def cleanup(keep_num=5):
keep_num = int(keep_num)
assert keep_num > 0, "[ERROR] keep_num must be > 0; refusing to proceed."
with cd("%(path)s/packages" % env):
package_files = sorted(run("ls -1").split())
package_files = [_.replace(".tar.gz", "") for _ in package_files]
with cd("%(path)s/releases" % env):
release_files = sorted(run("ls -1").split())
release_files.remove('current')
diff = set(package_files).symmetric_difference(set(release_files))
if diff:
raise Exception("[ERROR]: Package and release directories are out of sync;"
" refusing to proceed. Please fix this difference manually: %s" % diff)
package_files = package_files[:-keep_num]
release_files = release_files[:-keep_num]
with cd("%(path)s/packages" % env):
[sudo("rm %s.tar.gz" % _) for _ in package_files]
with cd("%(path)s/releases" % env):
[sudo("rm -r %s" % _) for _ in release_files] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _deleteOldVersionsByAge(self, model, max_age, number_to_keep=None):\r\n adapter = getVersionManagementAdapter(model)\r\n\r\n version_ids = self._getOldVersionIds(adapter)\r\n if number_to_keep is not None:\r\n if len(version_ids) < number_to_keep:\r\n return\r\n version_ids = version_ids[:-number_to_keep]\r\n\r\n then = datetime.now() - timedelta(days=max_age)\r\n oldest_time = DateTime(then.isoformat())\r\n\r\n index = None\r\n for id in version_ids:\r\n if adapter.getVersionModificationTime(id) >= oldest_time:\r\n break\r\n index = version_ids.index(id)\r\n\r\n delete_ids = []\r\n if index is not None:\r\n delete_ids = version_ids[:index]\r\n self._removed += len(delete_ids)\r\n model.manage_delObjects(delete_ids)",
"def do_clean(number=0):\n res = run(\"ls /data/web_static/releases\")\n\n number = int(number)\n list_names = str(res).split()\n date_list = []\n delete_list = []\n patt1 = re.compile(r'web_static_\\d{14}')\n for name in list_names:\n if re.fullmatch(patt1, name):\n date_list.append(int(name[11:]))\n else:\n delete_list.append(name)\n\n for elem in delete_list:\n run(\"rm -Rf /data/web_static/releases/\" + elem)\n\n if number == 0:\n list_names.remove(\"web_static_\" + str(max(date_list)))\n else:\n for _ in range(0, number):\n newer = max(date_list)\n list_names.remove(\"web_static_\" + str(newer))\n date_list.remove(newer)\n\n for names in list_names:\n run(\"rm -Rf /data/web_static/releases/\" + names)\n\n res = local(\"ls versions\")\n version_names = str(res).split()\n delete_list = []\n patt2 = re.compile(r'web_static_\\d{14}\\.tgz')\n for name in version_names:\n if re.fullmatch(patt2, name) is None:\n delete_list.append(name)\n for names in delete_list:\n local(\"rm -Rf versions/\" + names)\n for names in list_names:\n local(\"rm -Rf versions/\" + names + \".tgz\")",
"def prune(c):\n with conn.cd(utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_RELEASES_DIR)):\n releases = [\n d.replace(\"./\", \"\").strip()\n for d in conn.run(\"find . -maxdepth 1 -mindepth 1 -type d\", pty=True)\n .stdout.strip()\n .split(\"\\n\")\n ]\n releases.sort()\n\n diff = len(releases) - int(SALT_KEEP_RELEASES)\n print(\n f\"Found {len(releases)} current releases; set to keep {SALT_KEEP_RELEASES}\"\n )\n if diff > 0:\n to_delete = releases[:diff]\n print(f\"Cleaning up {len(to_delete)} old release(s)\")\n conn.run(f\"rm -rf {' '.join(to_delete)}\")\n else:\n print(\"Nothing to do\")",
"def Remove(self, version_number):\n self.dict.pop(str(version_number))",
"def purge_old() -> None:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"delete from entries where title not in\n (select title from entries order by year desc, month desc, day desc\n limit 20)\n \"\"\"\n )\n conn.commit()\n conn.close()",
"def removed_pkgs():\n name_versions = defaultdict(set)\n fedoras = py2_pkgs()\n last_fedoras = defaultdict(set)\n new = {pkg.name for pkg in repoquery(all=True)}\n for version in fedoras:\n for name_evr in set(fedoras[version]):\n name, _, evr = name_evr.partition(' ')\n if name not in new:\n name_versions[name].add(evr)\n last_fedoras[version].add(name)\n max_versions = {name: max(versions, key=SortableEVR)\n for name, versions in name_versions.items()}\n return last_fedoras, max_versions",
"def _cleanup_removed_versions(self, consumer, versions):\n prev_resource_types = set(\n self._versions_by_consumer[consumer].keys())\n cur_resource_types = set(versions.keys())\n removed_resource_types = prev_resource_types - cur_resource_types\n if removed_resource_types:\n LOG.debug(\"Removing stale tracked versions: %s\",\n removed_resource_types)\n for resource_type in removed_resource_types:\n self._set_version(consumer, resource_type, None)",
"def releases(releaser, count):\n releases = sorted(\n releaser.get_releases().values(),\n key=lambda rel: rel[\"end_timestamp\"],\n reverse=True,\n )\n click.echo(f\"Latest {count} releases:\")\n for release in releases[:count]:\n click.echo(f'{release[\"end_timestamp\"]} {release[\"commit\"]}')",
"def get_outdated_containers(prefix, num_to_keep=2):\n most_recent = []\n\n for container_name in pyrax.cloudfiles.list_containers():\n if container_name.startswith(prefix):\n container = pyrax.cloudfiles.get_container(container_name)\n last_modified = get_container_last_modified(container)\n\n if last_modified:\n most_recent.append((last_modified, container))\n\n most_recent.sort()\n most_recent.reverse()\n\n if len(most_recent) > num_to_keep:\n yield most_recent.pop()",
"def delete_version(self):\n pass",
"def manage_addVersionCleanUp(self, id, max_age, number_to_keep=None, pub_path=None, REQUEST=None):\r\n if not Id(self, id).isValid():\r\n return\r\n object = VersionCleanUp(id, max_age, number_to_keep, pub_path)\r\n self._setObject(id, object)\r\n add_and_edit(self, id, REQUEST, 'manage_workspace')\r\n return ''",
"def clean_old_backups(self, encrypted=None, compressed=None,\n content_type=None, database=None,\n keep_number=None):\n if keep_number is None:\n keep_number = settings.CLEANUP_KEEP if content_type == 'db' \\\n else settings.MEDIA_FILENAME_TEMPLATE\n files = self.list_backups(encrypted=encrypted, compressed=compressed,\n content_type=content_type, database=database)\n files = sorted(files, key=utils.filename_to_date, reverse=True)\n files_to_delete = [fi for i, fi in enumerate(files) if i >= keep_number]\n for filename in files_to_delete:\n self.delete_file(filename)",
"def gc_deploys(n = 10):\n for deploypath in [env.basepath, env.nodejs]:\n with cd(\"%s/releases\" % deploypath):\n files = run(\"ls -1t\").splitlines()\n older_files = files[n:]\n if len(older_files) > 0:\n puts(yellow(\"Removing older deploys: %s\" % \", \".join(older_files)))\n for file in older_files:\n run(\"rm -fr %s\" % file)",
"def unkeep(self, *args):\n self.__execute(self.pkgin_bin, \"unkeep\", *args)",
"def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases",
"def keep(self, *args):\n self.__execute(self.pkgin_bin, \"keep\", *args)",
"def remove_training_reserves():\n reserves = TrainingReserve.objects.all()\n now = timezone.now()\n for reserve in reserves:\n if reserve.date < now:\n reserve.delete()",
"def _gc(self):\n remove_before = time.time() - self._keep_for\n for item in self._queue:\n # Time for the sequence to be removed?\n if item[1] < remove_before:\n # Sequence data is old, so remove it\n self._queue.remove(item)\n else:\n # Sequence number was added recently, so don't remove it. Also\n # stop processing the queue because all later items will be\n # newer\n break",
"def release(self, number: int) -> None:\n if number not in self.numbers_set:\n self.numbers_q.append(number)\n self.numbers_set.add(number)",
"def delete_old():\n objs = (Snapshot\n .objects\n .filter(timestamp__lte=(datetime.now() - timedelta(days=35)))\n )\n objs.delete()",
"def get_files_to_delete(all_files, keep_copies):\n LOG.debug(\"Retain %d files\", keep_copies)\n if keep_copies == 0:\n return all_files\n else:\n return all_files[:-keep_copies]",
"def keep_old(ver: str) -> bool:\n ver = travis_normalize_py_version(ver)\n if ver == 'PyPy':\n return any(v.startswith('2') for v in new_versions)\n if ver == 'PyPy3':\n return any(v.startswith('3') for v in new_versions)\n return not is_important(ver)",
"def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)",
"def remove_old_songs(df, too_old=1970):\n drop_indices = df.index[df['year'] < too_old].tolist()\n df = df.drop(drop_indices)\n return df",
"def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()",
"def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()",
"def purge_outdated(self):\n todelete = []\n sql = \"select rowid, path, mtime from pictures\"\n cur = self.con.execute(sql)\n for rowid, path_str, mtime in cur:\n if mtime and op.exists(path_str):\n picture_mtime = os.stat(path_str).st_mtime\n if int(picture_mtime) <= mtime:\n # not outdated\n continue\n todelete.append(rowid)\n if todelete:\n sql = \"delete from pictures where rowid in (%s)\" % ','.join(map(str, todelete))\n self.con.execute(sql)",
"def remove_n_nos(self, num_nos):\n for i in range(num_nos):\n elem = random.randint(1, 11 ** 4)\n self.remove(elem)",
"def cleanup(self):\n results = run_command(\"gppkg -q --all\")\n gppkgs = results.split('\\n')[self.start_output:self.end_output] #The first line is 'Starting gppkg with args', which we want to ignore.\n\n for gppkg in gppkgs:\n run_command(\"gppkg --remove \" + gppkg)",
"def delete_old_backup(self):\n print \"### Info ### Delete redundant backups\"\n for i in range(len(self.date_list)-20):\n os.remove(os.path.abspath(self.backup_path + U'/voc2brain_backup_' + str(self.date_list[0])+ \".sdb3\") )"
] | [
"0.64156675",
"0.6380838",
"0.63680077",
"0.56757736",
"0.544219",
"0.5342159",
"0.5324215",
"0.52694285",
"0.5213804",
"0.51900595",
"0.51862675",
"0.5181022",
"0.51753306",
"0.51589394",
"0.51021165",
"0.50865173",
"0.50815326",
"0.5056275",
"0.5055268",
"0.505497",
"0.5032865",
"0.5020655",
"0.50156844",
"0.4998614",
"0.49933705",
"0.49908164",
"0.49771422",
"0.49425587",
"0.49103996",
"0.49086845"
] | 0.69273704 | 0 |
Give each Node uniform splits of data. Nodes will have same amounts of data. | def uniform_split(self, nr_agents):
indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()
self.samples = self.partition(self.samples, indices, nr_agents)
self.labels = self.partition(self.labels, indices, nr_agents) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r",
"def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])",
"def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)",
"def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)",
"def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)",
"def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il",
"def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])",
"def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk",
"def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]",
"def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]",
"def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits",
"def non_iid_split(self, nr_agents, class_per_node, random):\n unique = list(set(self.labels.tolist()))\n len_unique = len(unique)\n\n # Create array that assigns a class to specific nodes\n # Use 'np.arange' to ensure every class is represented before repeating\n # A row represents nr_agents, a column represents classes per node\n agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique\n np.random.shuffle(agent_class_master)\n agent_class_master = agent_class_master.reshape(nr_agents, class_per_node)\n\n # Split data by labels\n sample_list = [[] for _ in range(len_unique)]\n for i in range(len(self.labels)):\n sample_list[self.labels[i]].append(self.samples[i])\n\n # By class creates uniform or random indices splits to partition data to agents evenly\n class_count = np.bincount(agent_class_master.ravel())\n class_indices = {}\n for i in range(len(class_count)):\n if random:\n indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist())\n indices = [0] + indices\n indices += [len(sample_list[i])]\n class_indices[i] = indices\n else:\n class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1,\n dtype=int).tolist()\n\n # Main loop that partitions data by the assigned class and proper amount\n all_agents = []\n all_class = []\n for agent in agent_class_master:\n agent_data = []\n agent_class = []\n for cls in agent:\n # Proportioned indices for data and grab correctly indexed data\n temp_indices = class_indices[cls]\n data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1]\n\n # Add data and class to this agents list\n agent_data = agent_data + data_for_agent\n agent_class = agent_class + [cls for _ in range(len(data_for_agent))]\n\n # Drop first index since we used that data, forces next person to use next index\n class_indices[cls] = temp_indices[1:]\n\n # Append agents data and class labels in order\n all_agents.append(torch.stack(agent_data))\n all_class.append(torch.tensor(agent_class))\n\n self.samples = all_agents\n self.labels = all_class",
"def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))",
"def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)",
"def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)",
"def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets",
"def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest",
"def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)",
"def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]",
"def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data",
"def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]",
"def grow(self):\n while self.splittable_nodes:\n self.split_next()",
"def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError",
"def _distribute_data_to_cluster(self):\n\n for data in self.data:\n _distances = self._calculate_distances(data)\n _cluster = self._get_closest_cluster(_distances)\n self.clusters[_cluster].append(data)",
"def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, split, num_output)",
"def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return",
"def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard",
"def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )",
"def random_split(self, nr_agents):\n np.random.seed(self.random_seed)\n # Get random indices\n indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist())\n indices = [0] + indices\n indices += [self.samples.shape[0]]\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)",
"def batch_split(self) -> np.array:\n pass"
] | [
"0.6593751",
"0.64535356",
"0.6275442",
"0.624764",
"0.6210491",
"0.61586165",
"0.61282647",
"0.6122043",
"0.6105798",
"0.6105246",
"0.6102763",
"0.60595816",
"0.6045628",
"0.6025434",
"0.60080785",
"0.5965141",
"0.5959221",
"0.59428424",
"0.5918808",
"0.59182876",
"0.59140563",
"0.5881982",
"0.58554405",
"0.58533686",
"0.5852365",
"0.5845334",
"0.58368915",
"0.5830529",
"0.5824757",
"0.58202374"
] | 0.6529111 | 1 |
This function computes the distribution internal parameters from its two first moments. | def _compute_internals(self, moments):
[mean, stdv] = moments
internals = {}
internals['a'] = mean - np.sqrt(3) * stdv
internals['b'] = mean + np.sqrt(3) * stdv
return internals | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals",
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals",
"def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)",
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals",
"def moments(self):",
"def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w",
"def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))",
"def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables",
"def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma",
"def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict",
"def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)",
"def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars",
"def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)",
"def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])",
"def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict",
"def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s",
"def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))",
"def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m",
"def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2",
"def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict"
] | [
"0.6455378",
"0.6220392",
"0.6185545",
"0.6109156",
"0.6106636",
"0.60708535",
"0.60512894",
"0.60178155",
"0.5966822",
"0.59502286",
"0.58735156",
"0.5850575",
"0.58171284",
"0.5816514",
"0.57661724",
"0.5720821",
"0.57173246",
"0.57122564",
"0.5709464",
"0.57005703",
"0.56566393",
"0.56566393",
"0.56566393",
"0.5650613",
"0.5647901",
"0.56205666",
"0.56183773",
"0.5581819",
"0.5580499",
"0.5579431"
] | 0.6434524 | 1 |
This function computes the distribution internal parameters from its two first moments. | def _compute_internals(self, moments):
[mean, stdv] = moments
internals = {}
internals['mu'] = mean
internals['sigma'] = stdv
return internals | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals",
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals",
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals",
"def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)",
"def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals",
"def moments(self):",
"def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w",
"def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))",
"def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables",
"def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma",
"def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict",
"def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)",
"def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars",
"def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)",
"def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])",
"def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict",
"def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s",
"def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))",
"def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))",
"def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m",
"def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2",
"def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict"
] | [
"0.6434524",
"0.6434524",
"0.6220392",
"0.6185545",
"0.6109156",
"0.6106636",
"0.60708535",
"0.60512894",
"0.60178155",
"0.5966822",
"0.59502286",
"0.58735156",
"0.5850575",
"0.58171284",
"0.5816514",
"0.57661724",
"0.5720821",
"0.57173246",
"0.57122564",
"0.5709464",
"0.57005703",
"0.56566393",
"0.56566393",
"0.56566393",
"0.5650613",
"0.5647901",
"0.56205666",
"0.56183773",
"0.5581819",
"0.5580499"
] | 0.6455378 | 0 |
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ``` | def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_activities():\n pass",
"def get_activity(arn: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult:\n __args__ = dict()\n __args__['arn'] = arn\n __args__['name'] = name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:sfn/getActivity:getActivity', __args__, opts=opts, typ=GetActivityResult).value\n\n return AwaitableGetActivityResult(\n arn=pulumi.get(__ret__, 'arn'),\n creation_date=pulumi.get(__ret__, 'creation_date'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'))",
"def construct_strava_activity_data(activity):\n # if the timestamp has been saved then use this over converting the other one\n # issues with server tz so better to use the timestamp at the point the activity record was created\n if activity.iso_timestamp:\n local_time = activity.iso_timestamp\n else:\n local_time = activity.local_timestamp.isoformat()\n\n data = {'name': activity.title,\n 'type': STRAVA_ACTIVITIES_LOOKUP[activity.type],\n 'start_date_local': local_time,\n 'elapsed_time': activity.duration * 60, # need to convert to seconds, stored in db as minutes\n 'description': activity.description}\n\n if activity.distance is not None and activity.distance > 0:\n data['distance'] = activity.distance * 1000 # Strava API requires distance in m, stored in db as km\n\n return data",
"def _read_activity(session_path: Path):\n # Read activity file\n df_act = pd.read_csv(\n session_path / ACTIVITY_FILE,\n names=ACTIVITY_FILE_COLUMNS,\n usecols=[\n \"subject\",\n \"session_number\",\n \"start_time\",\n \"end_time\",\n \"gesture_scenario\",\n \"task_id\",\n ],\n header=None,\n engine=\"c\",\n )\n # Timestamps as additional datetime columns\n df_act[\"start_time_dt\"] = pd.to_datetime(df_act[\"start_time\"], unit=\"ms\")\n df_act[\"end_time_dt\"] = pd.to_datetime(df_act[\"end_time\"], unit=\"ms\")\n\n return df_act",
"def get_activity(variable):\n project = variable['project']\n try:\n exp = variable['exp']\n if isinstance(exp, list):\n return [CMOR_TABLES[project].activities[value][0] for value in exp]\n return CMOR_TABLES[project].activities[exp][0]\n except (KeyError, AttributeError):\n return None",
"def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response",
"def activity():\n return {\n \"type\": \"class\",\n \"base\": None,\n \"is_abstract\": True,\n \"is_document\": True,\n \"pstr\": (\"{}\", (\"canonical_name\",)),\n \"properties\": [\n (\n \"alternative_names\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity is also known.\",\n ),\n (\n \"canonical_name\",\n \"str\",\n \"0.1\",\n \"Community defined identifier or name.\",\n ),\n (\n \"citations\",\n \"linked_to(shared.citation)\",\n \"0.N\",\n \"Set of pertinent citations.\",\n ),\n (\n \"description\",\n \"str\",\n \"0.1\",\n \"Description of what is to be done (or was done).\",\n ),\n (\n \"duration\",\n \"time.time_period\",\n \"0.1\",\n \"Time the activity was (or will be) active.\",\n ),\n (\n \"internal_name\",\n \"str\",\n \"0.1\",\n \"A name used for internal purposes.\",\n ),\n (\"keywords\", \"str\", \"0.1\", \"User defined keywords.\"),\n (\"long_name\", \"str\", \"0.1\", \"Longer version of activity name.\"),\n (\"name\", \"str\", \"1.1\", \"Short name or abbreviation.\"),\n (\n \"responsible_parties\",\n \"shared.responsibility\",\n \"0.N\",\n \"People or organisations responsible for activity.\",\n ),\n (\n \"previously_known_as\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity was formerly known.\",\n ),\n (\n \"rationale\",\n \"str\",\n \"0.1\",\n \"Explanation of why this activity was carried out and/or what \"\n \"it was intended to achieve.\",\n ),\n ],\n }",
"def get_activity_object(activity_name, settings, logger, conn, token, activity_task):\n full_path = \"activity.\" + activity_name + \".\" + activity_name\n f = eval(full_path)\n # Create the object\n activity_object = f(settings, logger, conn, token, activity_task)\n return activity_object",
"def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)",
"def _request_activity_data(self, athlete, filename):\n response = self._get_request(self._activity_endpoint(athlete, filename)).json()\n\n activity = pd.DataFrame(response['RIDE']['SAMPLES'])\n activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION)\n\n activity.index = pd.to_timedelta(activity.time, unit='s')\n activity.drop('time', axis=1, inplace=True)\n\n return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]]",
"def test_get_activity(self):\n pass",
"def test_get_activity(self):\n pass",
"def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity",
"def get_activity(self, filename):\n return self._request_activity_data(self.athlete, filename)",
"def get_continuous_activity(self):\n from .continuousactivity import DSSContinuousActivity\n return DSSContinuousActivity(self.client, self.project_key, self.recipe_name)",
"def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response",
"def test_get_activities(self):\n pass",
"def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)",
"def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None",
"def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()",
"def get_current_activity(client):\n func = client.get_current_activity()\n activity_id = run_in_loop_now('get_current_activity', func)\n label = activities_by_id[str(activity_id)]\n return label",
"def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()",
"def activity(self):\n return self._activity",
"def fetch(self, activity):\n return None, None",
"def get_activity_name(activityType):\n return \"activity_\" + activityType",
"def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()",
"def manipulate_activity():\n pass",
"def get_activity_data(self, rid):\n raise NotImplementedError",
"def fetch_github_activity(gen, metadata):\n\n if \"GITHUB_ACTIVITY_FEED\" in gen.settings.keys():\n gen.context[\"github_activity\"] = gen.plugin_instance.fetch()",
"def getActivity(self):\n return self.activity"
] | [
"0.652813",
"0.64412045",
"0.6052704",
"0.60521966",
"0.59815174",
"0.5974512",
"0.58580536",
"0.58321756",
"0.57499844",
"0.57043284",
"0.5635826",
"0.5635826",
"0.55382925",
"0.5494571",
"0.5475397",
"0.5416918",
"0.53558916",
"0.5349023",
"0.53315115",
"0.5306385",
"0.52927756",
"0.52829766",
"0.5280142",
"0.526133",
"0.5234031",
"0.5233257",
"0.5224578",
"0.52213764",
"0.5152427",
"0.50889534"
] | 0.64874995 | 1 |
get the cvxpy variable associated with this layer | def get_cvxpy_variable(self, channel_indx=None):
if channel_indx is None:
output_channels = cp.hstack(
[
self.layer_input[cur_channel_indx]
for cur_channel_indx in range(self.n_in_channels)
]
)
else:
output_channels = self.layer_input[channel_indx]
return output_channels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xvar ( self ) :\n return self.__xvar",
"def x ( self ) :\n return self.xvar",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def zvar ( self ) :\n return self.__zvar",
"def getCvar(self, key):\n print \"get cvar %s\" % key\n return self.cvars.get(key)",
"def getVariable(self, gradientCoordinate):\n return self.variables[gradientCoordinate]",
"def get_variable(x):\n return x.cuda() #if use_cuda else x",
"def getVariable(self):\n return _libsbml.Rule_getVariable(self)",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def intrinsic_variable(self):\n if IVARG_ROLE in self.args:\n return self.args[IVARG_ROLE]\n return None",
"def var(self) -> float:\n return self._data.var()",
"def covar(self) -> np.ndarray:\n if self._covar is None:\n self._covar = batched_inv_spd(batched_cholesky(self._inv_covar))\n return self._covar",
"def variable_vis(self):\n return self._variable_vis",
"def to_var(self, x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)",
"def get_model_var(self):\n return self.model_var",
"def yvar ( self ) :\n return self.__yvar",
"def var(self):\n return np.diag(self.covar)",
"def _get_embedding_variable(self, layer_name):\n return self._tls._embed_variables.get(layer_name, None)",
"def getVariable(self):\n return _libsbml.EventAssignment_getVariable(self)",
"def name(self):\n return self.__nvXxPr.cNvPr.get('name')",
"def get_variable(x, volatile=False):\n tensor = torch.cuda.LongTensor(x) if CUDA else torch.LongTensor(x)\n return autograd.Variable(tensor, volatile=volatile)",
"def independent_variable(self):\n return self._independent_variable",
"def var(self):\n\n return self.scale ** -2 \\\n * (m.gamma(1 + 2 * self.shape ** -1) - m.gamma(1 + self.shape ** -1) ** 2)",
"def get_variable_value(self, name):\n return self._design.GetVariableValue(name)",
"def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))",
"def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))",
"def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))",
"def V_var(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.V_var\n\n return runtime_error_to_optional(self, _retrieve)"
] | [
"0.6952394",
"0.6349231",
"0.6311073",
"0.6311073",
"0.6311073",
"0.6210869",
"0.614176",
"0.61036706",
"0.6093137",
"0.5973595",
"0.5904504",
"0.58920914",
"0.5774172",
"0.5744649",
"0.5726218",
"0.5713547",
"0.56973785",
"0.56402665",
"0.5625536",
"0.5621677",
"0.5595852",
"0.559292",
"0.55797166",
"0.5579235",
"0.5548584",
"0.554761",
"0.5533927",
"0.5533927",
"0.5533927",
"0.55308187"
] | 0.7194828 | 0 |
returns number of output channels | def get_n_channels(self):
return self.n_out_channels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_channels(self):\n return len(self.channels)",
"def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 160, 448),\n (24, 40, 64, 176, 512),\n (32, 40, 72, 200, 576),\n (32, 48, 80, 224, 640),\n (32, 56, 88, 248, 704),\n (72, 104, 176, 480, 1376),\n ]",
"def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)",
"def channels(self) -> int:\n return len(self._channel_arrays)",
"def num_of_channels(self) -> int:\n return len(self.non_zero_channels())",
"def num_channels(self):\n return 3",
"def n_channels(self):\n return self._n_channels",
"def getNchan(self):\n return self.shape(squeeze=False)[2]",
"def nchans(self):\n return self.bw / self.bw_chan",
"def get_num_channels(x):\n return x.get_shape().as_list()[-1]",
"def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size",
"def get_num_outputs(self):\n return len(self.outputs)",
"def get_num_channels(self):\r\n check_mixer()\r\n return sdl.Mix_GroupCount(self._chunk_tag)",
"def get_num_channels(self):\n return _uhd_swig.rx_streamer_get_num_channels(self)",
"def get_number_of_output_ports(self):\n return 1",
"def out_channels(self):\r\n return [self._width] * (self._depth + 1)",
"def n_outputs(self):\n return len(self.output_names())",
"def n_outputs(self):\n return self.__n_outputs",
"def out_channel(self) -> int:\n return self._get_divisible_channel(self.args[0] * self.width_multiply)",
"def num_frames(self):\n return self._first_rgb.shape[1]",
"def num_channels(self):\n with audioread.audio_open(self.path) as f:\n return f.channels",
"def n_outputs(self):\n return len(self._output_labels)",
"def n_outputs(self):\n return len(self._output_labels)",
"def get_num_channels(self):\n return _uhd_swig.tx_streamer_get_num_channels(self)",
"def num_channels(input_tensor):\n return input_tensor.get_shape().as_list()[-1]",
"def get_num_of_output_tensors(self):\n return self._engine.get_num_of_output_tensors()",
"def channel_count(self):\n index = self._ordered_input_names.index('channel_count')\n return self._inputs[index]",
"def _get_cls_out_channels(self):\n # Class numbers (k) + objectness (1)\n return self.num_classes",
"def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()",
"def outdim(self):\n return len(self.getSensors())"
] | [
"0.7895569",
"0.78375614",
"0.7822447",
"0.77712107",
"0.763508",
"0.7623057",
"0.76186246",
"0.75209033",
"0.7185467",
"0.717589",
"0.71026057",
"0.70969474",
"0.7064426",
"0.7045207",
"0.6935932",
"0.6927934",
"0.69142646",
"0.68657845",
"0.6865311",
"0.6837313",
"0.68344104",
"0.68180615",
"0.68180615",
"0.67348015",
"0.67129594",
"0.6697453",
"0.669631",
"0.66316694",
"0.6584298",
"0.6572451"
] | 0.86962193 | 0 |
Constructs a BiRealNet18 model. | def birealnet18(pretrained=False, **kwargs):
model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def birealnet34(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model",
"def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model",
"def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model",
"def rl_modelrl_l1_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams",
"def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model",
"def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model",
"def rl_modelrl_l1_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams",
"def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model",
"def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)",
"def rl_modelrl_l2_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams",
"def rl_modelrl_l2_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams",
"def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])",
"def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)",
"def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model",
"def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model",
"def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model",
"def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model",
"def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model",
"def newModel(self, model_name):\n model = super().newModel(model_name)\n model.Params.Method = self.getint(CC.L2_GRB_METHOD, section=CC.GUROBI, default=-1)\n model.Params.Presolve = self.getint(CC.L2_GRB_PRESOLVE, section=CC.GUROBI, default=-1)\n model.Params.PreSparsify = self.getint(CC.L2_GRB_PRESPARSIFY, section=CC.GUROBI, default=-1)\n return model",
"def build_model():",
"def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)",
"def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()"
] | [
"0.6268504",
"0.620634",
"0.6204012",
"0.61781776",
"0.616232",
"0.61510354",
"0.61510354",
"0.61510354",
"0.61510354",
"0.61510354",
"0.6100227",
"0.6087697",
"0.6079006",
"0.6012798",
"0.5975343",
"0.59738714",
"0.59638786",
"0.5960628",
"0.5948427",
"0.5936224",
"0.5920398",
"0.591646",
"0.591646",
"0.591646",
"0.58933043",
"0.5810627",
"0.5810202",
"0.57819504",
"0.5714675",
"0.5711785"
] | 0.70269394 | 1 |
Constructs a BiRealNet34 model. | def birealnet34(pretrained=False, **kwargs):
model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model",
"def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model",
"def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def __init__(self, pretrained=True, freeze_weights=True):\n super(RaisinNet34, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained ResNet-34 model and turn off autograd\n # so its weights won't change.\n architecture = resnet34(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.conv1 = architecture.conv1\n self.bn1 = architecture.bn1\n self.relu = architecture.relu\n self.maxpool = architecture.maxpool\n self.layer1 = architecture.layer1\n self.layer2 = architecture.layer2\n self.layer3 = architecture.layer3\n self.layer4 = architecture.layer4\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of ResNet-34.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.fc.in_features\n self.fc = nn.Linear(in_features=in_ftrs, out_features=2, bias=True)\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)",
"def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple",
"def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model",
"def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)",
"def build_model(self) -> nn.Module:\n pass",
"def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()",
"def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)",
"def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)",
"def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model",
"def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model",
"def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model",
"def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)",
"def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self",
"def model_creator(config):\n return nn.Linear(1, 1)",
"def resnet34(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet34(pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model",
"def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model",
"def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model",
"def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model",
"def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model",
"def getModel(config: configuration.Configuration) -> torch.nn.Module:\n if config.modelName == ModelName.DENSE:\n return DenseGenerator(1, 1, n_blocks=config.blockCount)\n elif config.modelName == ModelName.SHALLOW:\n return Shallow(1, 1, )\n elif config.modelName == ModelName.TIRAMISU:\n model = Tiramisu(1, 1, structure=(\n config.down, # Down blocks\n config.bottleneck, # bottleneck layers\n config.up, # Up blocks\n ), checkpoint=False)\n\n model.initialize_kernels(torch.nn.init.kaiming_uniform_, conv=True)\n return model\n else:\n return SimpleCNN()"
] | [
"0.6753302",
"0.6753302",
"0.63931054",
"0.63634723",
"0.6265441",
"0.6239585",
"0.6229357",
"0.6220196",
"0.6198277",
"0.6196694",
"0.6196694",
"0.6196694",
"0.61152387",
"0.6085224",
"0.60785514",
"0.6060138",
"0.60452986",
"0.6025176",
"0.6025176",
"0.6025176",
"0.60139596",
"0.600001",
"0.5968062",
"0.5965487",
"0.5953345",
"0.59395945",
"0.59389675",
"0.5927736",
"0.59229404",
"0.59194046"
] | 0.73481995 | 0 |
Clears the peak_to_peer info which can get quite large. | async def clear_sync_info(self) -> None:
self.peak_to_peer = orderedDict() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n self._fingerprint = 0",
"def clear(self):\n self.mismatch_error = None\n self.pt_outs = None\n self._onnx_graph = None\n self.upper_graph_info = None\n self.lower_graph_info = None",
"def clear(self):\n self.molo_tcp_pack.clear()\n self.tranparency = False\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True",
"def clear(self):\n self._members = []\n self._size = 0\n self._updated = True\n self._BFS_collect = None\n self._center = None",
"def reset():\n\n Follower.clear()",
"def clear(self):\n self.molo_tcp_pack.clear()\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True\n self.client_status = None",
"def restore_peak_size(self):\n if self.left_peak_size > 0 and self.peak_size < self.size:\n # Account for the left_peak_size which might be less than peak_size\n diff = min(self.size - self.peak_size, self.left_peak_size)\n self.peak_size += diff\n self.left_peak_size -= diff",
"def clear(self):\n self.__attendees = []\n self._track_changes()",
"def clear(self):\n self.append_send_buffer = bytes()\n self.append_connect = True",
"def clear(self):\n self._latencies = [0] * len(BUCKETS)",
"def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0",
"def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()",
"def reset(self):\n self._monitor.notify_received()\n self._pinger.stop()\n self._mark_fresh()",
"def clear_pins(self):\n self.pins = {}\n self.all_pins = set()\n self.pin_groups = {} \n # DO NOT clear the blockages as these don't change\n self.rg.reinit()",
"def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None",
"def rm_calibration(self):\n\n self.bin_edges_kev = None",
"def clear(self):\n wait(self.proto.vanish())",
"def invalidate_min_max(self):\n self.max_amplitude = None\n self.min_amplitude = None\n self.max_wavenumber = None\n self.min_wavenumber = None",
"def clearResonancePeakDimContribs(resonance,peaks=None):\n\n if not peaks:\n peaks = []\n\n peakDict = {}\n for peak in peaks:\n peakDict[peak] = True\n \n peakDims = {} \n for contrib in resonance.peakDimContribs:\n peakDim = contrib.peakDim\n \n if (not peakDict) or peakDict.get(peakDim.peak):\n peakDims[peakDim] = True\n peakContribs = contrib.peakContribs\n contrib.delete()\n \n for peakContrib in peakContribs:\n if not peakContrib.peakDimContribs:\n peakContrib.delete()",
"def reset(self):\n self._topics.clear()",
"def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)",
"def clear(self):\n\n self.__fasteners.clear()\n self.__update()",
"def clearPulse(self):\n self.pulses = dict() # old mode for compatibility reasons\n self._params[\"pulses\"] = dict() # old mode\n self.totalPulse[:] = 0 # old mode\n self.sendPulse() # old mode\n\n self.clearMarkersList() # new mode\n self.pulseList = []\n self.preparePulseSequence()\n self.prepareMarkerSequence()\n self.sendPulseSequence()",
"def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None\n self.prev_attn = None",
"def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None",
"def reset(self):\n self.visited = set()\n del self.targets[0]",
"def clear_face(self):\n rospy.loginfo('clearing all learned faces')\n self._clear_srv()",
"def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None",
"def clear_statistics(self, sniff_port_list):\n pass",
"def reset(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n self.declarations_table = None\n self.annotations_table = None\n self.num_frames = 0\n self.num_frames_by_uid = {}\n self.num_frames_by_uid_pre_remove = {}"
] | [
"0.6206844",
"0.58822215",
"0.5839706",
"0.5757953",
"0.57487977",
"0.5747801",
"0.5705526",
"0.5700115",
"0.5695211",
"0.56707",
"0.5589478",
"0.5570707",
"0.5559506",
"0.55220956",
"0.55071974",
"0.55066085",
"0.5479404",
"0.5468562",
"0.5449649",
"0.5447672",
"0.5441297",
"0.54409546",
"0.54408336",
"0.5408519",
"0.5401802",
"0.5380283",
"0.5370429",
"0.53647",
"0.5354091",
"0.5350153"
] | 0.7599027 | 0 |
Make a hex string from the venue names to use as a unique id. Only the last 8 characters are used for the unique id. | def make_unique_id(venue_list):
md5_hash = md5()
for name in venue_list:
md5_hash.update(name)
hash_hex = md5_hash.hexdigest()
return hash_hex[-8:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)",
"def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id",
"def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()",
"def makeid(cls):\n return str(uuid.uuid4().hex)",
"def _id(target):\n return ''.join([hex(char) for char in bytearray(target)])",
"def unique_id() -> str:",
"def get_hex_id(fullRouterName):\n hexId = \"\"\n if fullRouterName.count(\"=\") > 0:\n hexId = fullRouterName.split(\"=\")[0]\n else:\n hexId = fullRouterName.split(\"~\")[0]\n hexId = hexId.replace(\"$\", \"\")\n return hexId",
"def create_hash_hex(self, vehicles):\n field = \"\"\n for i, vehicle in enumerate(vehicles):\n if vehicle.orientation == 'H':\n x = vehicle.x\n if x == 10:\n x = \"a\"\n elif x == 11:\n x = \"b\"\n field += str(x)\n else:\n y = vehicle.y\n if y == 10:\n y = \"a\"\n elif y == 11:\n y = \"b\"\n field += str(y)\n return field",
"def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id",
"def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))",
"def unique_id() -> bytes:",
"def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')",
"def unique_str():\n return hex(random.randint(0, 256 * 256 * 256 * 256 - 1))[2:]",
"def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()",
"def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))",
"def make_trace_id(trace_id: bytes) -> str:\n return base64.b64encode(trace_id).decode(\"utf-8\")",
"def _make_uuid(val):\n h = hashlib.md5(val).hexdigest()\n return '{0}-{1}-{2}-{3}-{4}'.format(\n h[:8], h[8:12], h[12:16], h[16:20], h[20:])",
"def uuid():\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))",
"def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))",
"def gen_uuid() -> str:\n return str(uuid4())",
"def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)",
"def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)",
"def generate_order_id():\n rands = []\n for i in range(0, 16):\n r = random()\n rand = 4294967296.0 * r\n rands.append((int(rand) >> ((3 & i) << 3)) & 255)\n\n hexa = []\n for i in range(0, 256):\n hexa.append(str(hex(i+256)).lstrip(\"0x\").rstrip(\"L\")[1:])\n\n id = \"\"\n for i in range(0, 16):\n id += hexa[rands[i]]\n\n if (i == 3) or (i == 5) or (i == 7) or (i == 9):\n id += \"-\"\n\n return(id)",
"def new_uid():\n return str(uuid.uuid1())[:30]",
"def get_uuid(s):\n sha = sha256(s.encode('utf-8')).hexdigest()\n uuid = UUID(sha[:32])\n return str(uuid)",
"def gen_uuid():\n return str( uuid.uuid4() )",
"def gen_uuid():\n return str( uuid.uuid4() )",
"def gen_uuid():\n return str(uuid.uuid4())",
"def generate_id():\n return uuid4().get_hex()",
"def generate_id(employee_id) :\n\n\t\thash_bits = random.getrandbits(128)\n\t\thash_code = \"%032x\" % hash_bits\n\t\thash = hash_code[:6] + str(employee_id)\n\n\t\treturn hash"
] | [
"0.7108777",
"0.67829424",
"0.67574894",
"0.66712064",
"0.66090417",
"0.6605987",
"0.6547099",
"0.643587",
"0.6423709",
"0.64172685",
"0.6412221",
"0.6411684",
"0.63644415",
"0.63502777",
"0.6323095",
"0.62953424",
"0.6283156",
"0.62323576",
"0.61994445",
"0.61936736",
"0.61043406",
"0.6089622",
"0.60891914",
"0.608694",
"0.60810244",
"0.6064978",
"0.6064978",
"0.6064894",
"0.6049746",
"0.60430974"
] | 0.8124629 | 0 |
Raises a ValueError if matrix `value` is not square. | def assert_square(name: str, value: np.ndarray) -> None:
if not len(value.shape) == 2 or value.shape[0] != value.shape[1]:
raise ValueError(f"{name} must be a square") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")",
"def test_change_basis_raises_not_square(self, fun):\n A = np.random.rand(4, 6)\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n fun(A)",
"def check_squareness(A):\n if len(A) != len(A[0]):\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def check_squareness(A):\n if len(A) != len(A[0]):\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def test_expend_not_square(self):\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n symplectic.expand_passive(np.ones((3, 2)), [0, 1, 2], 5)",
"def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True",
"def test_5_size_less_than_1(self):\r\n with self.assertRaises(ValueError):\r\n S4 = Square(0)",
"def test_data_value(self):\n self.assertRaises(ValueError, Square, 0, 2, 3)\n self.assertRaises(ValueError, Square, -2)\n self.assertRaises(ValueError, Square, 3, -3, 2)\n self.assertRaises(ValueError, Square, 2, 3, -2)",
"def test_badsizevaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square((1, 2), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def validate_square(cls, square):\n if len(square) > 3:\n raise ValueError('Invalid square')\n\n cls._get_row_fow_letter(square[0])\n square_column = int(square[1:])\n if square_column not in range(1, 11):\n raise ValueError('The number of the column must be '\n 'an integer between 1 to 10')",
"def square(value):\n return value ** 2",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def test_badsizevaluelists(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square([1, 2], 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def can_add_to_square(self, tile, value):\n start_row = tile.row // self.board_squared * self.board_squared\n start_col = tile.column // self.board_squared * self.board_squared\n\n for row in range(start_row, start_row + self.board_squared):\n for col in range(start_col, start_col + self.board_squared):\n if self.puzzle[row][col].value == value:\n return False\n\n return True",
"def test_badsizevaluefuncs(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(print(), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_badsizevaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_set_cell_with_too_large_column(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (0, 9), 0)",
"def test_sd_under_exception(self):\n z_matrix = np.array(\n [[ 0.0, 0.0, 1.0], # noqa: E201\n [-0.1, 0.2, 0.8], # noqa: E201\n [ 0.2, 0.4, 0.6], # noqa: E201\n [ 0.3, 0.7, 0.3], # noqa: E201\n [ 0.6, 0.8, 0.2], # noqa: E201\n [ 0.8, 0.9, 0.1], # noqa: E201\n [ 1.0, 1.0, 0.0]], # noqa: E201\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"SD\")",
"def test_value_error(self):\n self._error_test(ValueError)",
"def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return",
"def test_sd_over_exception(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.1],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"SD\")",
"def handle_errors(self, value):\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")",
"def is_square(m):\n if not hasattr(m, '__len__'):\n return False\n\n is_flat_square_matrix = all(np.isscalar(c) for c in m) and np.sqrt(len(m)).is_integer()\n if is_flat_square_matrix:\n return True\n\n is_structed_square_matrix = all(len(row) == len(m) for row in m)\n return is_structed_square_matrix",
"def test_badsizevaluesets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square({1, 2, 3}, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_convert_matrix():\n foo = Value(matrices=[[1.0, 2.0], [-2.0, 1.0]])\n assert foo.matrices[0][0][1].value == 2.0\n assert foo.matrices[0][1][0].value == -2.0",
"def __allowed_values_correct_matrix(self):\n strTestName = 'Values of a Numpy Array 2D (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)))\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_set_cell_with_too_large_row(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (9, 0), 0)",
"def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])",
"def test_Sobol_G_raises_error_if_values_wrong_size():\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n with raises(ValueError):\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)"
] | [
"0.70691025",
"0.6647211",
"0.62668157",
"0.62668157",
"0.6163664",
"0.6089783",
"0.6032893",
"0.5889549",
"0.58770996",
"0.58336306",
"0.57889503",
"0.57666147",
"0.5684565",
"0.5575981",
"0.5559106",
"0.5558494",
"0.553429",
"0.55041176",
"0.5487154",
"0.5484843",
"0.547051",
"0.5455025",
"0.544322",
"0.54421824",
"0.54277766",
"0.5418165",
"0.5416506",
"0.5412892",
"0.54055595",
"0.5360501"
] | 0.7312172 | 0 |
Calculates the Shannon entropy for probabilities `ps` with `base`. | def shannon_entropy(ps: np.ndarray, base: int = 2) -> float:
return -np.sum(ps * np.log(ps) / np.log(base)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdist_[\"nats\"].sum()\n\n # convert base\n if base:\n entropy_ = entropy_ / np.log(base)\n\n # return\n return entropy_",
"def shannon(counts, base=2):\n freqs = counts/float(counts.sum())\n nonzero_freqs = freqs[freqs.nonzero()]\n return -sum(nonzero_freqs*log(nonzero_freqs))/log(base)",
"def entropy_numba(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))",
"def _entropy2(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n # quick observation shows ent between 0.0 and 4.0.\n return ent",
"def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))",
"def calculate_entropy(prob):\n return -(prob * math.log(prob,2))",
"def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S",
"def cross_entropy(p, q, base=2):\n q = ma.array(q, mask=(q == 0))\n return - np.vdot(p, ma.log(q)) / np.log(base)",
"def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )",
"def equitability(counts, base=2):\n return shannon(counts, base)/(log((counts!=0).sum())/log(base))",
"def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)",
"def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))",
"def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))",
"def get_entropy_of_labels(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n return ent",
"def entropy(x, bins, normalize=False, xy_probabilities=False):\n # calculate probabilities if xy_probabilities == False\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n p = x + 1e-15\n else:\n p = x\n else:\n # get the bins\n bins = np.histogram_bin_edges(x, bins)\n\n # calculate the empirical probabilities\n count = np.histogram(x, bins=bins)[0]\n\n # if counts should be None, raise an error\n if np.sum(count) == 0:\n raise ValueError('The histogram cannot be empty. Adjust the bins to ' +\n 'fit the data')\n # calculate the probabilities\n p = (count / np.sum(count)) + 1e-15\n\n\n # calculate the Shannon Entropy\n if normalize:\n # get number of bins\n nbins = len(p)\n # maximal entropy: uniform distribution\n normalizer = np.log2(nbins) \n\n return - p.dot(np.log2(p)) / normalizer\n else:\n return - p.dot(np.log2(p))",
"def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy",
"def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())",
"def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])",
"def base_entropy_masked(seq_list, base_set, base_idx):\n # entropy analysis\n base_list = [seq[base_idx] for seq in seq_list]\n freq_dict = Counter(base_list)\n mask_list = ['-', 'N']\n n_seq = sum([freq_dict[base] for base in freq_dict if base not in mask_list])\n H = 0\n total_masked = 0\n for base in freq_dict:\n if base in mask_list:\n total_masked += freq_dict[base]\n continue\n P = freq_dict[base]/n_seq\n H -= log2(P) * P\n masked_pct = total_masked/len(base_list)\n return H, masked_pct",
"def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)",
"def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e",
"def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count",
"def entropy(class_probabilities):\n return sum(-p * math.log(p, 2)\n for p in class_probabilities\n if p) #ignore 0's",
"def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent",
"def entropy(class_probabilities):\n return sum(-p * math.log(p,2)\n for p in class_probabilities\n if p)",
"def entropy_(P):\n res = 0.0\n\n mask = P != 0.0 # avoid 0 in log\n f = lambda x: x*np.log2(x)\n # map-reduce strategy (likely to be more optimized than loops)\n temp = list(map(f, P[mask]))\n res = -np.sum(temp, dtype=float)\n return res",
"def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))",
"def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s",
"def entropy(p_list):\n assert len(p_list) > 0\n E = 0.0\n for p in p_list:\n if p == 0.0:\n continue\n E += p*math.log(p)\n return E",
"def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy"
] | [
"0.76036006",
"0.67516744",
"0.6684365",
"0.6516514",
"0.64760756",
"0.62832654",
"0.6267193",
"0.62411416",
"0.6221311",
"0.62205845",
"0.6211119",
"0.6175704",
"0.61547273",
"0.59715253",
"0.5954994",
"0.59400564",
"0.58892614",
"0.5873213",
"0.58565325",
"0.5835365",
"0.5805754",
"0.5805641",
"0.57898796",
"0.5783675",
"0.577394",
"0.5706728",
"0.56519604",
"0.5623014",
"0.55960464",
"0.557805"
] | 0.8918445 | 0 |
Simply tests if `img` has 3 channels. | def is_rgb(img: np.ndarray) -> bool:
return len(img.shape) >= 1 and img.shape[-1] == 3 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False",
"def rgb(self) -> bool:\n return self.image_shape[2] == 3",
"def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True",
"def is3DImage(self):\n\t\treturn self.is3D",
"def check_niimg_3d(niimg, dtype=None):\n return check_niimg(niimg, ensure_ndim=3, dtype=dtype)",
"def num_channels_in_image(img: torch.Tensor):\n if img is None or img.ndim < 2:\n raise ValueError('Invalid image data')\n if img.ndim == 2:\n return 1\n else:\n return img.shape[0]",
"def is_gray(img: np.ndarray):\n return len(img.shape) == 2 and img.shape[0] > 1 and img.shape[1] > 1",
"def is_grayscale(img):\n return len(img.shape) == GS",
"def is_cv3():\n (major, minor, _) = cv2.__version__.split('.')\n return int(major) == 3",
"def check_image_size(img_name, img_path):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Determine size of image\n width, height = img.size\n \n # Check if image is square\n if (width==height):\n is_square = True\n else:\n is_square = False\n \n # Check for channels in image\n img_list = list(img.getdata())\n img_max = max(img_list)\n if (type(img_max)==int):\n is_single_channel = True\n else:\n is_single_channel = False\n \n return is_square, is_single_channel\n \n finally:\n \n # Close image\n img.close()",
"def _isGrayscale(self, img: ndarray) -> bool:\n if len(np.squeeze(img).shape) == 2:\n return True\n else:\n return False",
"def colored(img: np.array):\n # Check if image is colored or black and white\n r, g, b = [normalize(img[..., i]) for i in range(3)]\n color_factor = sum([np.mean(np.square(c1 - c2)) for c1, c2 in ((r, g), (r, b), (b, r))])\n return color_factor >= 0.04",
"def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False",
"def count_nonblack_np(img):\n return img.any(axis=-1).sum()",
"def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img",
"def countless3d(data):\n modshape = np.array(data.shape) % 2\n assert sum(\n modshape\n ) == 0, \"COUNTLESS 3D currently only supports even sided images.\" # someone has to write even_to_odd3d\n\n return countless(data, (2, 2, 2))",
"def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5",
"def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq",
"def is_three_channeled(value):\n return len(value) == 3",
"def num_channels(self):\n return 3",
"def check_num_channels(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [num_output_channels], _ = parse_user_args(method, *args, **kwargs)\n if num_output_channels is not None:\n if num_output_channels not in (1, 3):\n raise ValueError(\"Number of channels of the output grayscale image\"\n \"should be either 1 or 3. Got {0}\".format(num_output_channels))\n\n return method(self, *args, **kwargs)\n\n return new_method",
"def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False",
"def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()",
"def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)",
"def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid",
"def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )",
"def is_grayscale(self):\n return self.r == self.g == self.b",
"def is_grayscale(self):\n return self.r == self.g == self.b",
"def _check_data(data):\n if not (data.dtype == _np.float32 and data.flags.c_contiguous):\n raise ValueError('supplied data must be float32 and C contiguous')\n if data.ndim == 2:\n num_frames, channels = data.shape\n elif data.ndim == 1:\n num_frames, channels = data.size, 1\n else:\n raise ValueError('rank > 2 not supported')\n return num_frames, channels",
"def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False"
] | [
"0.72373766",
"0.7074375",
"0.67956823",
"0.6699473",
"0.66691566",
"0.6518619",
"0.65036654",
"0.62500054",
"0.62126744",
"0.6188705",
"0.61673236",
"0.61294484",
"0.61257964",
"0.6089409",
"0.594763",
"0.59257436",
"0.5916708",
"0.5915381",
"0.58001804",
"0.5760735",
"0.57465404",
"0.5720184",
"0.5707138",
"0.5641538",
"0.5616377",
"0.56064075",
"0.5595096",
"0.5595096",
"0.5594173",
"0.5593368"
] | 0.74561703 | 0 |
Converts an array [..., channels] of RGB values to HSI color values (H in rad). RGB values are assumed to be normalized to (0, 1). | def rgb_to_hsi(image: np.ndarray) -> np.ndarray:
if not is_rgb(image):
raise ValueError("Input needs to be an array of RGB values")
r = image[..., 0]
g = image[..., 1]
b = image[..., 2]
out = np.zeros_like(image)
# allequal = (img == img[:, :, 0, np.newaxis]).all(axis=-1)
with np.errstate(invalid="ignore"):
tmp = (2.0 * r - g - b) / 2.0 / np.sqrt((r - g) ** 2 + (r - b) * (g - b)) # if r==g==b then 0/0
theta = np.arccos(np.clip(tmp, -1.0, +1.0))
out[..., 0] = np.where(b <= g, theta, 2 * np.pi - theta) # H
out[..., 2] = np.sum(image, axis=-1) / 3.0 # I
out[..., 1] = 1 - np.amin(image, axis=-1) / out[..., 2] # S if r==g==b==0 then 0/0
np.nan_to_num(out[..., 0:2], copy=False)
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rgb2hsl_img(rgb):\r\n \r\n def core(_rgb, _hsl):\r\n\r\n irgb = _rgb.astype(np.uint16)\r\n ir, ig, ib = irgb[:, :, 0], irgb[:, :, 1], irgb[:, :, 2]\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n\r\n imin, imax = irgb.min(2), irgb.max(2)\r\n iadd, isub = imax + imin, imax - imin\r\n\r\n ltop = (iadd != 510) * (iadd > 255)\r\n lbot = (iadd != 0) * (ltop == False)\r\n\r\n l[:] = iadd.astype(np.float) / 510\r\n\r\n fsub = isub.astype(np.float)\r\n s[ltop] = fsub[ltop] / (510 - iadd[ltop])\r\n s[lbot] = fsub[lbot] / iadd[lbot]\r\n\r\n not_same = imax != imin\r\n is_b_max = not_same * (imax == ib)\r\n not_same_not_b_max = not_same * (is_b_max == False)\r\n is_g_max = not_same_not_b_max * (imax == ig)\r\n is_r_max = not_same_not_b_max * (is_g_max == False) * (imax == ir)\r\n\r\n h[is_r_max] = ((0. + ig[is_r_max] - ib[is_r_max]) / isub[is_r_max])\r\n h[is_g_max] = ((0. + ib[is_g_max] - ir[is_g_max]) / isub[is_g_max]) + 2\r\n h[is_b_max] = ((0. + ir[is_b_max] - ig[is_b_max]) / isub[is_b_max]) + 4\r\n h[h < 0] += 6\r\n h[:] /= 6\r\n\r\n hsl = np.zeros(rgb.shape, dtype=np.float)\r\n cpus = multiprocessing.cpu_count()\r\n length = int(math.ceil(float(hsl.shape[0]) / cpus))\r\n line = 0\r\n threads = []\r\n while line < hsl.shape[0]:\r\n line_next = line + length\r\n thread = threading.Thread(target=core, args=(rgb[line:line_next], hsl[line:line_next]))\r\n thread.start()\r\n threads.append(thread)\r\n line = line_next\r\n\r\n for thread in threads:\r\n thread.join()\r\n\r\n return hsl",
"def hsv_convert(arr):\n \n # adapted from Arnar Flatberg\n # http://www.mail-archive.com/[email protected]/msg06147.html\n # it now handles NaN properly and mimics colorsys.rgb_to_hsv output\n\n import numpy as np\n\n #assert(arr.min()>=0 and arr.max()<=1)\n\n #arr = arr/255.\n arr = arr.astype(\"float32\")\n out = np.empty_like(arr)\n\n arr_max = arr.max(-1)\n delta = arr.ptp(-1)\n s = delta / arr_max\n \n s[delta==0] = 0\n\n # red is max\n idx = (arr[:,:,0] == arr_max) \n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n\n # green is max\n idx = (arr[:,:,1] == arr_max) \n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0] ) / delta[idx]\n\n # blue is max\n idx = (arr[:,:,2] == arr_max) \n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1] ) / delta[idx]\n\n out[:,:,0] = (out[:,:,0]/6.0) % 1.0\n out[:,:,1] = s\n out[:,:,2] = arr_max\n\n # rescale back to [0, 255]\n #out *= 255.\n\n # remove NaN\n out[np.isnan(out)] = 0\n\n return out",
"def hsl2rgb_img(hsl):\r\n\r\n def core(_hsl, _frgb):\r\n\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n fr, fg, fb = _frgb[:, :, 0], _frgb[:, :, 1], _frgb[:, :, 2]\r\n\r\n q = np.zeros(l.shape, dtype=np.float)\r\n\r\n lbot = l < 0.5\r\n q[lbot] = l[lbot] * (1 + s[lbot])\r\n\r\n ltop = lbot == False\r\n l_ltop, s_ltop = l[ltop], s[ltop]\r\n q[ltop] = (l_ltop + s_ltop) - (l_ltop * s_ltop)\r\n\r\n p = 2 * l - q\r\n q_sub_p = q - p\r\n\r\n is_s_zero = s == 0\r\n l_is_s_zero = l[is_s_zero]\r\n per_3 = 1./3\r\n per_6 = 1./6\r\n two_per_3 = 2./3\r\n\r\n def calc_channel(channel, t):\r\n\r\n t[t < 0] += 1\r\n t[t > 1] -= 1\r\n t_lt_per_6 = t < per_6\r\n t_lt_half = (t_lt_per_6 == False) * (t < 0.5)\r\n t_lt_two_per_3 = (t_lt_half == False) * (t < two_per_3)\r\n t_mul_6 = t * 6\r\n\r\n channel[:] = p.copy()\r\n channel[t_lt_two_per_3] = p[t_lt_two_per_3] + q_sub_p[t_lt_two_per_3] * (4 - t_mul_6[t_lt_two_per_3])\r\n channel[t_lt_half] = q[t_lt_half].copy()\r\n channel[t_lt_per_6] = p[t_lt_per_6] + q_sub_p[t_lt_per_6] * t_mul_6[t_lt_per_6]\r\n channel[is_s_zero] = l_is_s_zero.copy()\r\n\r\n calc_channel(fr, h + per_3)\r\n calc_channel(fg, h.copy())\r\n calc_channel(fb, h - per_3)\r\n\r\n frgb = np.zeros(hsl.shape, dtype=np.float)\r\n cpus = multiprocessing.cpu_count()\r\n length = int(math.ceil(float(hsl.shape[0]) / cpus))\r\n line = 0\r\n threads = []\r\n while line < hsl.shape[0]:\r\n line_next = line + length\r\n thread = threading.Thread(target=core, args=(hsl[line:line_next], frgb[line:line_next]))\r\n thread.start()\r\n threads.append(thread)\r\n line = line_next\r\n\r\n for thread in threads:\r\n thread.join()\r\n\r\n return (frgb*255).round().astype(np.uint8)",
"def rgb_to_hsv(x):\n # separating channels\n R = x[:,:,0]\n G = x[:,:,1]\n B = x[:,:,2]\n \n \n # h, s, v = hue, saturation, value \n # initial arrays for h, s and v filled with 0.0\n # we take R array just as 2D sample for copying the shape\n H = np.full_like(R, 0.0, dtype=np.double)\n S = np.full_like(R, 0.0, dtype=np.double)\n V = np.full_like(R, 0.0, dtype=np.double)\n \n HSV = np.full_like(x, 0.0, dtype=np.double)\n \n # np.max/min and axis=2 creates a 2D matrix\n C_max = np.max(x, axis=2) # maximum of r, g, b \n C_min = np.min(x, axis=2) # minimum of r, g, b \n Diff = C_max - C_min # diff of cmax and cmin. \n \n # Formula:\n # https://www.geeksforgeeks.org/program-change-rgb-color-model-hsv-color-model/\n \n # if cmax and cmax are equal (R=G=B) then h = 0 \n H[np.isclose(C_max, R, 0.0001)] = 0 \n \n # if cmax equal r \n m = np.isclose(C_max, R, 0.0001)&(Diff!=0)\n H[m] = (60 * ((G[m] - B[m]) / Diff[m]) + 360) % 360\n \n\n # if cmax equal g \n m = np.isclose(C_max, G, 0.0001)&(Diff!=0)\n H[m] = (60 * ((B[m] - R[m]) / Diff[m]) + 120) % 360\n \n # if cmax equal b \n m = np.isclose(C_max, B, 0.0001)&(Diff!=0)\n H[m] = (60 * ((R[m] - G[m]) / Diff[m]) + 240) % 360\n \n # if cmax equal zero \n S[C_max == 0] = 0\n \n # else\n m = (C_max != 0)\n S[m] = (Diff[m] / C_max[m])\n \n # compute v \n V = C_max\n \n # building new 3D picture\n HSV[:,:,0] = H\n HSV[:,:,1] = S\n HSV[:,:,2] = V\n \n return HSV",
"def _rgb_to_hsv(img):\n maxc = img.max(axis=-3)\n minc = img.min(axis=-3)\n\n is_equal = paddle.equal(maxc, minc)\n one_divisor = paddle.ones_like(maxc)\n c_delta = maxc - minc\n # s is 0 when maxc == minc, set the divisor to 1 to avoid zero divide.\n s = c_delta / paddle.where(is_equal, one_divisor, maxc)\n\n r, g, b = img.unbind(axis=-3)\n c_delta_divisor = paddle.where(is_equal, one_divisor, c_delta)\n # when maxc == minc, there is r == g == b, set the divisor to 1 to avoid zero divide.\n rc = (maxc - r) / c_delta_divisor\n gc = (maxc - g) / c_delta_divisor\n bc = (maxc - b) / c_delta_divisor\n\n hr = (maxc == r).astype(maxc.dtype) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)).astype(maxc.dtype) * (rc - bc + 2.0)\n hb = ((maxc != r) & (maxc != g)).astype(maxc.dtype) * (gc - rc + 4.0)\n h = (hr + hg + hb) / 6.0 + 1.0\n h = h - h.trunc()\n return paddle.stack([h, s, maxc], axis=-3)",
"def rgb_to_hsv(x):\n hsv = th.zeros(*x.size())\n c_min = x.min(0)\n c_max = x.max(0)\n\n delta = c_max[0] - c_min[0]\n\n # set H\n r_idx = c_max[1].eq(0)\n hsv[0][r_idx] = ((x[1][r_idx] - x[2][r_idx]) / delta[r_idx]) % 6\n g_idx = c_max[1].eq(1)\n hsv[0][g_idx] = 2 + ((x[2][g_idx] - x[0][g_idx]) / delta[g_idx])\n b_idx = c_max[1].eq(2)\n hsv[0][b_idx] = 4 + ((x[0][b_idx] - x[1][b_idx]) / delta[b_idx])\n hsv[0] = hsv[0].mul(60)\n\n # set S\n hsv[1] = delta / c_max[0]\n\n # set V - good\n hsv[2] = c_max[0]\n\n return hsv",
"def rgbToHsv ( r, g = 0.0, b = 0.0 ):\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n if isinstance( r, int ):\n r /= 255.0\n if isinstance( g, int ):\n g /= 255.0\n if isinstance( b, int ):\n b /= 255.0\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n v = _max\n\n d = _max - _min\n s = 0.0 if max == 0.0 else d / _max\n\n if _max == _min:\n h = 0.0 # achromatic\n else:\n if _max == r:\n h = ( g - b ) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = ( b - r ) / d + 2.0\n elif _max == b:\n h = (r - g) / d + 4.0\n h /= 6.0\n\n # map top 360, 100, 100\n # h = int( round( h * 360 ) )\n # s = int( round( s * 100 ) )\n # v = int( round( v * 100 ) )\n\n return [ h, s, v ]",
"def rgbToHsv ( r, g = 0.0, b = 0.0 ):\n\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n v = _max\n d = _max - _min;\n s = 0.0 if _max == 0.0 else d / _max\n\n if _max == _min:\n h = 0.0 # achromatic\n else:\n if _max == r:\n h = (g - b) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = (b - r) / d + 2.0\n elif _max == b:\n h = (r - g) / d + 4.0\n h /= 6.0\n return [ h, s, v ]",
"def rgb_to_hsv(rgb, dtype=numpy.float64):\n arr = _prepare_colorarray(rgb, dtype)\n out = numpy.empty_like(arr)\n\n # -- V channel\n out_v = arr.max(-1)\n\n # -- S channel\n delta = arr.ptp(-1)\n\n # Ignore warning for zero divided by zero\n old_settings = numpy.seterr(divide='ignore', invalid='ignore')\n try:\n out_s = delta / out_v\n out_s[delta == 0.] = 0.\n\n # -- H channel\n # Red is max.\n idx = (arr[:, 0] == out_v)\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n\n # Green is max\n idx = (arr[:, 1] == out_v)\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n\n # Blue is max.\n idx = (arr[:, 2] == out_v)\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n out_h = (out[:, 0] / 6.) % 1.\n out_h[delta == 0.] = 0.\n finally:\n numpy.seterr(**old_settings)\n\n # -- Output\n out[:, 0] = out_h\n out[:, 1] = out_s\n out[:, 2] = out_v\n\n # Remove NaNs\n out[numpy.isnan(out)] = 0\n return out",
"def hslToRgb ( h, s = 0.0, l = 0.0, a = 1.0 ):\n\n # Check if argument is list\n if isinstance(h, list):\n s = h[1]\n l = h[2]\n h = h[0]\n\n if isinstance(h, int):\n h /= 360.0\n if isinstance(s, int):\n s /= 100.0\n if isinstance(l, int):\n l /= 100.0\n\n r = l\n g = l\n b = l\n v = l * ( 1.0 + s ) if l <= 0.5 else l + s - l * s\n if ( v > 0 ):\n m = l + l - v\n sv = ( v - m ) / v\n h *= 6.0\n sextant = int( math.floor( h ) )\n fract = h - sextant\n vsf = v * sv * fract\n mid1 = m + vsf\n mid2 = v - vsf\n\n # Switch sextant\n if sextant == 0:\n r = v\n g = mid1\n b = m\n elif sextant == 1:\n r = mid2\n g = v\n b = m\n elif sextant == 2:\n r = m\n g = v\n b = mid1\n elif sextant == 3:\n r = m\n g = mid2\n b = v\n elif sextant == 4:\n r = mid1\n g = m\n b = v\n elif sextant == 5:\n r = v\n g = m\n b = mid2\n\n return [ r, g, b ]",
"def __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max):\r\n\r\n if var_max == var_min:\r\n return 0.0\r\n elif var_max == var_R:\r\n return (60.0 * ((var_G - var_B) / (var_max - var_min)) + 360) % 360.0\r\n elif var_max == var_G:\r\n return 60.0 * ((var_B - var_R) / (var_max - var_min)) + 120\r\n elif var_max == var_B:\r\n return 60.0 * ((var_R - var_G) / (var_max - var_min)) + 240.0",
"def rgb2hsl(rgb, h_prec=0, sl_prec=3):\n for value in rgb:\n if not 0 <= value <= 255:\n raise ValueError('One or more RGB values are outside [0, 255]')\n\n r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255\n\n c_max = max(r, g, b)\n c_min = min(r, g, b)\n delta = c_max - c_min\n\n # Hue\n if delta == 0:\n h = 0\n elif c_max == r:\n h = 60 * (((g - b) / delta) % 6)\n elif c_max == g:\n h = 60 * ((b - r) / delta + 2)\n else:\n h = 60 * ((r - g) / delta + 4)\n\n # Lightness\n l = (c_max + c_min) / 2\n\n # Saturation\n if delta == 0:\n s = 0\n else:\n s = delta / (1 - abs(2 * l - 1))\n\n return round(h, h_prec), round(s, sl_prec), round(l, sl_prec)",
"def rgb_to_hues(rgb):\n hsv = filters.filter_rgb_to_hsv(rgb, display_np_info=False)\n h = filters.filter_hsv_to_h(hsv, display_np_info=False)\n return h",
"def RGB_to_HSL(cobj, *args, **kwargs):\r\n \r\n var_R = cobj.rgb_r\r\n var_G = cobj.rgb_g\r\n var_B = cobj.rgb_b\r\n \r\n var_max = max(var_R, var_G, var_B)\r\n var_min = min(var_R, var_G, var_B)\r\n \r\n var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max)\r\n var_L = 0.5 * (var_max + var_min)\r\n \r\n if var_max == var_min:\r\n var_S = 0\r\n elif var_L <= 0.5:\r\n var_S = (var_max - var_min) / (2.0 * var_L)\r\n else:\r\n var_S = (var_max - var_min) / (2.0 - (2.0 * var_L))\r\n \r\n return HSLColor(\r\n var_H, var_S, var_L)",
"def rgbToHsl( r, g = 0.0, b = 0.0 ):\n\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n l = (_max + _min) / 2.0\n\n if _max == _min:\n # achromatic\n h = 0.0\n s = 0.0\n else:\n d = _max - _min\n s = d / ( 2.0 - _max - _min ) if l > 0.5 else d / (_max + _min)\n\n if _max == r:\n h = ( g - b ) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = ( b - r ) / d + 2.0\n else: # max == b\n h = ( r - g ) / d + 4.0\n h /= 6.0\n return [ h, s, l ]",
"def hsvToRgb ( h, s = 0.0, v = 0.0 ):\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n if isinstance( h, int ):\n h /= 360.0\n if isinstance( s, int ):\n s /= 100.0\n if isinstance( v, int ):\n v /= 100.0\n\n if v == 0.0:\n return [0, 0, 0]\n\n h = h * 6.0\n i = int( math.floor( h ) )\n\n f = h - i\n p = v * ( 1.0 - s )\n q = v * ( 1.0 - ( s * f ) )\n t = v * ( 1.0 - ( s * ( 1.0 - f ) ) )\n\n if i == 0:\n r = v\n g = t\n b = p\n elif i == 1:\n r = q\n g = v\n b = p\n elif i == 2:\n r = p\n g = v\n b = t\n elif i == 3:\n r = p\n g = q\n b = v\n elif i == 4:\n r = t\n g = p\n b = v\n elif i == 5:\n r = v\n g = p\n b = q\n # To return int\n # r = int( math.floor( r * 255 ) )\n # g = int( math.floor( g * 255 ) )\n # b = int( math.floor( b * 255 ) )\n\n return [ r, g, b ]",
"def tohsv(self):\n\n return rgb_to_hsv(self.r * RGB_CHANNEL_SCALE, self.g * RGB_CHANNEL_SCALE, self.b * RGB_CHANNEL_SCALE)",
"def RGBtoHSL( rgb ):\n # R' = R/255 (G' = G/255, B' = B/255)\n Rp = rgb[2]/255\n Gp = rgb[1]/255\n Bp = rgb[0]/255\n Cmax = max(Rp,Gp,Bp)\n Cmin = min(Rp,Gp,Bp)\n Delta = Cmax - Cmin\n if Delta == 0:\n Hue = 0\n elif Cmax == Rp:\n Hue = 60*(((Gp-Bp)/Delta)%6)\n elif Cmax == Gp:\n Hue = 60*((Bp-Rp)/Delta + 2)\n else:\n Hue = 60*((Rp-Gp)/Delta + 4)\n\n Lit = (Cmax+Cmin)/2\n\n if Delta == 0:\n Sat = 0\n else:\n Sat = Delta/(1-abs(2*Lit-1))\n #print(\"H:\",Hue,\"S:\",Sat,\"L:\",Lit)\n return (Hue,Sat,Lit)",
"def convert_rgb_hsl(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n lcol = (mxi+mni)/2\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if d_f == 0:\n scol = 0\n else:\n scol = d_f/(1-abs(2*lcol-1))\n\n return hcol, scol, lcol",
"def Saturation(img):\r\n factor = 2 * np.random.rand()\r\n HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n H, S, V = cv2.split(HSV)\r\n S= S* np.float(factor)\r\n S = np.where( S>255, 255,S)\r\n S = np.where( S<0, 0, S)\r\n HSV[:,:,1] = np.uint8(S)\r\n BGR = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)\r\n return BGR",
"def _hls2rgb(self,h):\n h=h**self.exponent\n if(self.invert): h=1.0-h\n h=h*360.0\n h=Numeric.fmod(h,360.0)\n if(self.hls_hls):\n h=h/60.0\n else:\n if(h<120):\n h=h/120.0 # /* 0..1 Rot..(Orange)..Gelb */\n elif(h<180):\n h=h/60.0 - 1.0 # /* 1..2 Gelb..Gruen */\n elif(h<240):\n h=h/30.0 - 4.0 # /* 2..4 Gruen..Blaugruen..Blau*/\n else:\n h=h/60.0 # /* 4..6 Blau..Purpur..Rot */\n c=int(h)\n frac=h-c\n if (self.hls_l<=0.5):\n maxi=self.hls_l*(1.0+self.hls_s)\n else:\n maxi=self.hls_l+self.hls_s-self.hls_l*self.hls_s\n mini=2*self.hls_l-maxi;\n diff=maxi-mini;\n if(self.hls_s==0): # /* grau */\n return(1.0,1.0,1.0) \n else:\n if(c==0):\n return(maxi,mini+frac*diff,mini)\n elif(c==1):\n return(mini+(1.0-frac)*diff,maxi,mini)\n elif(c==2):\n return(mini,maxi,mini+frac*diff)\n elif(c==3):\n return(mini,mini+(1.0-frac)*diff,maxi)\n elif(c==4):\n return(mini+frac*diff,mini,maxi)\n else:\n return(maxi,mini,mini+(1.0-frac)*diff)",
"def greyscale(c):\n return desaturate(c, 1)",
"def rgb2hls(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hls(r,g,b)",
"def rgb2hls(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hls(r,g,b)",
"def rgb2hsv(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hsv(r,g,b)",
"def hyper2rgb(img, bands):\n rgb = spectral.get_rgb(img, bands)\n rgb /= np.max(rgb)\n rgb = np.asarray(255 * rgb, dtype='uint8')\n return rgb",
"def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]",
"def filterToHue( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n hue = int(255*HSL[0]//360) # convert to 0-255 range\n bmp.pixels[h][w] = (hue,hue,hue)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp",
"def rgb_to_hsv(arr):\r\n arr = np.asarray(arr)\r\n\r\n # check length of the last dimension, should be _some_ sort of rgb\r\n if arr.shape[-1] != 3:\r\n raise ValueError(\"Last dimension of input array must be 3; \"\r\n \"shape {} was found.\".format(arr.shape))\r\n\r\n in_shape = arr.shape\r\n arr = np.array(\r\n arr, copy=False,\r\n dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.\r\n ndmin=2, # In case input was 1D.\r\n )\r\n out = np.zeros_like(arr)\r\n arr_max = arr.max(-1)\r\n ipos = arr_max > 0\r\n delta = arr.ptp(-1)\r\n s = np.zeros_like(delta)\r\n s[ipos] = delta[ipos] / arr_max[ipos]\r\n ipos = delta > 0\r\n # red is max\r\n idx = (arr[..., 0] == arr_max) & ipos\r\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\r\n # green is max\r\n idx = (arr[..., 1] == arr_max) & ipos\r\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\r\n # blue is max\r\n idx = (arr[..., 2] == arr_max) & ipos\r\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\r\n\r\n out[..., 0] = (out[..., 0] / 6.0) % 1.0\r\n out[..., 1] = s\r\n out[..., 2] = arr_max\r\n out = out.reshape(in_shape)\r\n return out",
"def hsv2rgb_float(h,s,v):\n\tif h<0.0: h=0.0\n\tif h>1.0: h=1.0\n\tif s<0.0: s=0.0\n\tif s>1.0: s=1.0\n\tif v<0.0: v=0.0\n\tif v>1.0: v=1.0\n\th = h*6.0\n\tif h<=3.0:\n\t\tif h<=1.0:\n\t\t\tr = 1.0;g=h;b=0.0\n\t\telif h<=2.0:\n\t\t\tr = 2.0-h;g=1.0;b=0.0\n\t\telse:\n\t\t\tr = 0.0;g=1.0;b=h-2.0\n\telse:\n\t\tif h<=4.0:\n\t\t\tr = 0.0;g=4.0-h;b=1.0\n\t\telif h<=5.0:\n\t\t\tr = h-4.0;g=0.0;b=1.0\n\t\telse:\n\t\t\tr = 1.0;g=0.0;b=6.0-h\n\tq = 1.0-s\n\tr = q+s*r\n\tg = q+s*g\n\tb = q+s*b\n\treturn (v*r,v*g,v*b)"
] | [
"0.6703506",
"0.6296489",
"0.6287106",
"0.60505944",
"0.59786993",
"0.5978399",
"0.59774935",
"0.59652996",
"0.5940715",
"0.59046626",
"0.58576584",
"0.5831712",
"0.58161163",
"0.58112276",
"0.5776856",
"0.57461786",
"0.57437605",
"0.5730638",
"0.5720245",
"0.56746477",
"0.5639751",
"0.56256825",
"0.56137604",
"0.56137604",
"0.55920374",
"0.5565166",
"0.5555471",
"0.5552449",
"0.55394083",
"0.5534414"
] | 0.7039188 | 0 |
Converts an array [..., channels] of RGB values to Digital Y'CbCr (0255). RGB values are assumed to be normalized to (0, 1). Don't forget to cast to uint8 for pillow. | def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray:
""" from RGB (0-1).
"""
if not is_rgb(image):
raise ValueError("Input needs to be an array of RGB values")
m = np.array(
[
[+065.481, +128.553, +024.966],
[-037.797, -074.203, +112.000],
[+112.000, -093.786, -018.214],
]
)
a = np.array([16, 128, 128])
return np.dot(image, m.T) + a | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)",
"def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)",
"def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')",
"def to_ycc(color):\n return rgb_to_ycc(*[x / 255.0 for x in color])",
"def rgb_to_ycbcr(rgb):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n\n def apply_transform(x):\n return np.array(np.dot(transform, x))[0]\n\n return np.apply_along_axis(apply_transform, 2, rgb)",
"def rgb_to_ycbcr(rgb_uint8):\n if rgb_uint8.dtype != numpy.uint8:\n raise TypeError('`rgb_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8` is nD, n >= 4.\n if rgb_uint8.ndim != 3:\n raise ValueError('`rgb_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8.shape[2]` is larger than 4.\n if rgb_uint8.shape[2] != 3:\n raise ValueError('`rgb_uint8.shape[2]` is not equal to 3.')\n rgb_float64 = rgb_uint8.astype(numpy.float64)\n y_float64 = 0.299*rgb_float64[:, :, 0] \\\n + 0.587*rgb_float64[:, :, 1] \\\n + 0.114*rgb_float64[:, :, 2]\n cb_float64 = 128. \\\n - (0.299/1.772)*rgb_float64[:, :, 0] \\\n - (0.587/1.772)*rgb_float64[:, :, 1] \\\n + (0.886/1.772)*rgb_float64[:, :, 2]\n cr_float64 = 128. \\\n + (0.701/1.402)*rgb_float64[:, :, 0] \\\n - (0.587/1.402)*rgb_float64[:, :, 1] \\\n - (0.114/1.402)*rgb_float64[:, :, 2]\n ycbcr_float64 = numpy.stack((y_float64, cb_float64, cr_float64),\n axis=2)\n return cast_float_to_uint8(ycbcr_float64)",
"def ycbcr_to_rgb(ycbcr):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n inverse = transform.getI()\n\n def apply_transform(ycbcr):\n return np.array(np.dot(inverse, ycbcr))[0]\n\n return np.apply_along_axis(apply_transform, 2, ycbcr)",
"def rgb_to_ycbcr(img):\n\n T = np.array([\n [0.256788235294118, -0.148223529411765, 0.439215686274510],\n [0.504129411764706, -0.290992156862745, -0.367788235294118],\n [0.097905882352941, 0.439215686274510, -0.071427450980392],\n ], dtype=np.float64)\n\n O = np.array([16, 128, 128], dtype=np.float64)\n\n img = img.astype(np.float64)\n res = np.matmul(img, T) + O\n res = res.clip(0, 255).round().astype(np.uint8)\n\n return res",
"def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)",
"def ycbcr_to_rgb(ycbcr_uint8):\n if ycbcr_uint8.dtype != numpy.uint8:\n raise TypeError('`ycbcr_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8` is nD, n >= 4.\n if ycbcr_uint8.ndim != 3:\n raise ValueError('`ycbcr_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8.shape[2]` is larger than 4.\n if ycbcr_uint8.shape[2] != 3:\n raise ValueError('`ycbcr_uint8.shape[2]` is not equal to 3.')\n ycbcr_float64 = ycbcr_uint8.astype(numpy.float64)\n red_float64 = ycbcr_float64[:, :, 0] \\\n + 1.402*(ycbcr_float64[:, :, 2] - 128.)\n green_float64 = ycbcr_float64[:, :, 0] \\\n - (0.114*1.772*(ycbcr_float64[:, :, 1] - 128.)/0.587) \\\n - (0.299*1.402*(ycbcr_float64[:, :, 2] - 128.)/0.587)\n blue_float64 = ycbcr_float64[:, :, 0] \\\n + 1.772*(ycbcr_float64[:, :, 1] - 128.)\n rgb_float64 = numpy.stack((red_float64, green_float64, blue_float64),\n axis=2)\n return cast_float_to_uint8(rgb_float64)",
"def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)",
"def bgr_to_yuv_channels(matrix):\n yuv_matrix = cv2.cvtColor(matrix, cv2.COLOR_BGR2YUV)\n return cv2.split(yuv_matrix)",
"def save_images(inputY, inputCbCr, size, image_path):\n def merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n return img\n\n inputY = inputY.astype('uint8')\n inputCbCr = inputCbCr.astype('uint8')\n output_concat = np.concatenate((inputY, inputCbCr), axis=3)\n\n assert len(output_concat) <= size[0] * size[1], \"number of images should be equal or less than size[0] * size[1] {}\".format(len(output_concat))\n\n new_output = merge(output_concat, size)\n\n new_output = new_output.astype('uint8')\n\n img = Image.fromarray(new_output, mode='YCbCr')\n img = img.convert('RGB')\n img.save(image_path)",
"def rgb_to_bit(rgb_array):\r\n return [[round_color(elem) for elem in row] for row in rgb_array]",
"def RGB2BGR(x):\n out = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)\n return out",
"def yuv_channels_to_bgr_image(y_channel, u_channel, v_channel):\n yuv_image = cv2.merge((y_channel.astype(np.float32), u_channel.astype(np.float32), v_channel.astype(np.float32)))\n bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)\n return bgr_image",
"def bgr_to_ycrcb(matrix3d: np.ndarray) -> np.ndarray:\n return np.apply_along_axis(bgr_pixel_to_ycrcb, 2, matrix3d)",
"def brc(x):\n N = len(x)\n y = np.zeros(N, dtype=complex)\n width = int(np.log2(N))\n for ii in np.arange(N):\n idx = '{:0{width}b}'.format(ii, width=width)\n y[ii] = x[int(idx[::-1],2)]#Reverse order of bits of integer ii\n return y",
"def skin_detect_ycbcr(frame):\n Cr_min, Cr_max, Cb_min, Cb_max = 133, 150, 77, 127\n # Constants for finding range of skin color in YCrCb\n min_YCrCb = np.array([0,Cr_min,Cb_min], np.uint8)\n max_YCrCb = np.array([255,Cr_max,Cb_max], np.uint8)\n\n # Convert image to YCrCb\n imageYCrCb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n # Find region with skin tone in YCrCb image\n skinRegion = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb) \n # Do contour detection on skin region\n _, contours, hierarchy = cv2.findContours(skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n return imageYCrCb, contours, hierarchy",
"def yuv_to_rgb(img_yuv):\n\n y = img_yuv[..., 0]\n u = img_yuv[..., 1]\n v = img_yuv[..., 2]\n\n r = y + 1.14 * v\n g = y - 0.396 * u - 0.581 * v\n b = y + 2.029 * u\n\n img_rgb = np.stack((r, g, b), axis=2)\n img_rgb = np.clip(img_rgb, 0, 1)\n return img_rgb",
"def yuv2bgr(tens: Tensor) -> Tensor:\n if not _is_yuv_image(tens):\n raise ValueError(\n f\"Tensor of shape 3 expected. Found shape {len(tens.shape)}. \"\n \"This function converts an YUV Tensor to its BGR counterpart\"\n )\n\n img = cv.cvtColor(tens, YUV2BGR)\n return to_tensor(img, cspace=\"bgr\")",
"def yuv_to_ycbcr(yuv, bit_depth=10):\n\n bit_multi = 2 ** (bit_depth - 8)\n y_coef = 219 * bit_multi\n y_offset = 16 * bit_multi\n cbcr_coef = 224 * bit_multi\n cbcr_offset = 128 * bit_multi\n\n ycbcr = yuv.copy()\n ycbcr[:, 0] = np.round(ycbcr[:, 0] * y_coef + y_offset)\n ycbcr[:, 1] = np.round(ycbcr[:, 1] * cbcr_coef + cbcr_offset)\n ycbcr[:, 2] = np.round(ycbcr[:, 2] * cbcr_coef + cbcr_offset)\n\n return ycbcr",
"def convert_yuv_to_rgb(img_arr): \n rgb = cv2.cvtColor(img_arr, cv2.COLOR_YUV2BGR_I420)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)\n return Image.fromarray(rgb)",
"def yiq2rgb(imYIQ):\n trans = np.array([[1, 0.956, 0.62], [1, -0.272, -0.647], [1, -1.108, 1.705]])\n return np.dot(imYIQ, trans)",
"def ycbcr_to_yuv(ycbcr, bit_depth=10):\n\n bit_multi = 2 ** (bit_depth - 8)\n y_coef = 219 * bit_multi\n y_offset = 16 * bit_multi\n cbcr_coef = 224 * bit_multi\n cbcr_offset = 128 * bit_multi\n\n ycbcr_tmp = ycbcr.copy()\n ycbcr_tmp[:, 0] = (ycbcr_tmp[:, 0] - y_offset) / y_coef\n ycbcr_tmp[:, 1] = (ycbcr_tmp[:, 1] - cbcr_offset) / cbcr_coef\n ycbcr_tmp[:, 2] = (ycbcr_tmp[:, 2] - cbcr_offset) / cbcr_coef\n\n return ycbcr_tmp",
"def transformRGB2YIQ(imgRGB: np.ndarray) -> np.ndarray:\r\n YIQ_from_RGB = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n YIQImg = np.ndarray(imgRGB.shape)\r\n\r\n YIQImg[:, :, 0] = YIQ_from_RGB[0,0] * imgRGB[:, :, 0] + YIQ_from_RGB[0,1] * imgRGB[:, :, 1] + YIQ_from_RGB[0,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 1] = YIQ_from_RGB[1,0] * imgRGB[:, :, 0] + YIQ_from_RGB[1,1] * imgRGB[:, :, 1] + YIQ_from_RGB[1,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 2] = YIQ_from_RGB[2,0] * imgRGB[:, :, 0] + YIQ_from_RGB[2,1] * imgRGB[:, :, 1] + YIQ_from_RGB[2,2] * imgRGB[:, :, 2]\r\n\r\n return YIQImg",
"def convert_6_channels_to_rgb(channels):\n base = channels[0]\n # qual is the minimum of base quality and mapping quality at each position\n # 254 is the max value for quality scores because the SAM specification has\n # 255 reserved for unavailable values.\n qual = np.minimum(channels[1], channels[2])\n strand = channels[3]\n # alpha is <supports variant> * <base != reference>\n alpha = np.multiply(channels[4] / 254.0, channels[5] / 254.0)\n return np.multiply(np.stack([base, qual, strand]),\n alpha).astype(np.uint8).transpose([1, 2, 0])",
"def rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)",
"def rgb2yuv(r, g, b, mode='444'):\n r = 255 * r\n g = 255 * g\n b = 255 * b\n y = 00.257 * r + 0.504 * g + 0.098 * b + 16\n u = -0.148 * r - 0.291 * g + 0.439 * b + 128\n v = 00.439 * r - 0.368 * g - 0.071 * b + 128\n if mode == '420':\n y, u, v = YUV_change_mode(y, u, v, '444to420')\n return (y / 255), (u / 255), (v / 255)",
"def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))"
] | [
"0.65595174",
"0.65373284",
"0.65213215",
"0.6495782",
"0.64475703",
"0.64128786",
"0.6219854",
"0.6115602",
"0.5978177",
"0.5957845",
"0.59014165",
"0.5828215",
"0.5691829",
"0.5660269",
"0.5658129",
"0.56305027",
"0.5622264",
"0.5549955",
"0.55226725",
"0.5519686",
"0.55096513",
"0.5500089",
"0.549074",
"0.54538333",
"0.5449599",
"0.5416644",
"0.53697497",
"0.53483725",
"0.5328446",
"0.53073555"
] | 0.6613111 | 0 |
Returns a triangular matrix with random value between 0 and 1 uniformly. | def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray:
a = np.random.uniform(0, 1, (size, size))
if lower:
ind = np.triu_indices(5, 1)
else:
ind = np.tril_indices(5, 1)
a[ind] = 0
return a | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp",
"def random_matrix(rows, cols):\n return np.random.randn(rows, cols)",
"def sample_matrix(dim, bound):\n return np.random.uniform(low=-bound, high=bound, size=(dim, dim))",
"def irandmatrix(n, range = 10):\n A = mp.matrix(n, n)\n for i in xrange(n):\n for j in xrange(n):\n A[i,j]=int( (2 * mp.rand() - 1) * range)\n return A",
"def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]",
"def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5",
"def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux",
"def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random",
"def random_transition_matrix(n: int) -> np.ndarray:\n\n x = np.abs(np.random.normal(size=(n, n)))\n rsum = x.sum(axis=1)\n return x / rsum[:, np.newaxis]",
"def create_matrix(size):\n total_size = size * size\n rand_matrix = np.reshape(\n np.random.choice(\n [0, 1], int(total_size), p=[0.9, 0.1]\n ),\n (size, size)\n )\n return rand_matrix",
"def generate_random_matrix(dim):\n\n A = np.complex128(np.random.random([dim, dim]))\n A_adjoint = A.conj().T\n\n P = A @ A_adjoint\n P += np.identity(len(P))\n\n P_inverse = np.linalg.inv(P)\n\n return P_inverse",
"def rand(cls):\n q_vec = np.random.rand(4)\n q=Quat(q_vec)\n q.normalize()\n return q",
"def uniform_weights(n):\n return np.ones((n, 1)) / n",
"def matrix_generate(n):\n a = np.eye(n)\n max = 0\n for i in range(n):\n for j in range(n):\n a[i][j] = random.randint(0,50)\n a[j][i] = a[i][j]\n if a[i][j] > max:\n max = a[i][j]\n for i in range(n):\n a[i][i] = max * n + random.randint(20,40)\n return np.array(a)",
"def WeightInitializer():\n return np.random.uniform(-1, 1)",
"def _random_not_singular(N):\n data = np.zeros((1, 1))\n while np.linalg.det(data) == 0:\n data = np.random.random((N, N)) + \\\n 1j * np.random.random((N, N)) - (0.5 + 0.5j)\n return data",
"def get_random_uniform(m,n):\n\n return 2*np.random.random(size=(m,n)) - 1",
"def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])",
"def makeRandom(cls, m, n, min=0, max=1):\n Matrix.validate_dimensions(m, n)\n data = [[randrange(min, max) for j in range(n)] for i in range(m)]\n return RealMatrix(m, n, data)",
"def generate_onehot_matrix(n: int = 1024, ndim: int = 8, random_seed: int = None) -> TYPE_ARRAY:\n to_vec = lambda x: [1 if i == x else 0 for i in range(ndim)]\n return numpy.array([to_vec(x) for x in _RNG.randint(0, ndim, n)]).astype(int)",
"def generatePattern(numCols=100, minOnes=21, maxOnes=25):\n assert minOnes < maxOnes\n assert maxOnes < numCols\n\n nOnes = random.randint(minOnes, maxOnes)\n ind = random.sample(xrange(numCols), nOnes)\n x = numpy.zeros(numCols, dtype='float32')\n x[ind] = 1\n\n return x",
"def init_matrix(x_dim = 10, y_dim = 10):\n ret = np.zeros((x_dim, y_dim))\n x_rand = np.random.randint(0, x_dim - 1)\n y_rand = np.random.randint(0, y_dim - 1)\n ret[x_rand, y_rand] = 1\n\n return(ret)",
"def rand(self, x):\r\n return np.random.random(1)[0]",
"def simple_genotype_matrix(n, p):\n genotypes = np.zeros(shape=(n, p))\n for item in range(0, p):\n genotypes[:, item] = np.random.binomial(1, np.random.uniform(0.1, 0.5, 1), n)\n\n return genotypes",
"def random(cls):\n return cls(np.random.randn(3)).normalized()",
"def get_rnd_simplex(dimension, random_state):\n t = random_state.uniform(0, 1, dimension - 1)\n t = np.append(t, [0, 1])\n t.sort()\n\n return np.array([(t[i + 1] - t[i]) for i in range(len(t) - 1)])",
"def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )",
"def generate_s_matrix(number: int):\n matrix_zero = np.ones((number, number))\n matrix_zero[1:-1, 1:-1] = 0\n return matrix_zero",
"def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:\n rho = random_numpy_hermitian(nqubits, dtype=dtype)\n # Normalize\n ids = np.arange(2 ** nqubits)\n rho[ids, ids] = rho[ids, ids] / np.trace(rho)\n return rho.astype(dtype)",
"def random_table(self):\n rule_set = []\n for i in range(self.k ** (2 * self.r + 1) - 1):\n g = np.random.rand()\n if g > self.lambda_param:\n g = 0\n else:\n g = np.random.randint(1, self.k)\n rule_set.append(g)\n rule_set.append(0)\n return rule_set"
] | [
"0.6796665",
"0.6433331",
"0.6395273",
"0.63390714",
"0.63257",
"0.6278706",
"0.61659557",
"0.61443967",
"0.60865873",
"0.5927011",
"0.5907153",
"0.5863584",
"0.5841046",
"0.58366776",
"0.57918566",
"0.5788798",
"0.5776318",
"0.57745236",
"0.57488704",
"0.5734677",
"0.56929296",
"0.56653774",
"0.56517637",
"0.56503206",
"0.5616845",
"0.5604512",
"0.5602481",
"0.5602272",
"0.55816734",
"0.55757606"
] | 0.74994147 | 0 |
Performs batched calculation of `v^T A v` transform. Special case of bilinear form `x^T A y` | def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray:
""" Faster than
Av = np.matmul(A, v[...,:,None]) # [B, X, 1]
return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B]
"""
return np.einsum("...k,...kl,...l->...", v, A, v) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]",
"def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \n #- Find where we are in grid\n #- clip to 1 because we will use i and i-1\n #- clip to len(x)-1 to allow extrapolation beyond grid boundary\n ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)\n iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)\n \n #- Interpolation distances from points\n dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])\n dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])\n\n #- Interpolate, allowing x and/or y to be multi-dimensional\n #- NOTE: these are the slow steps, about equal time each\n \n #- Original code with what appears to be vestigial transposes\n # data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T\n # data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T\n # dataxy = (data1.T*(1-dy) + data2.T*dy).T\n\n #- Updated without transposes\n data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)\n data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)\n dataxy = (data1*(1-dy) + data2*dy)\n\n return dataxy",
"def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res",
"def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias",
"def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v",
"def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation",
"def _gv_bilinear(self, r, t):\n mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]\n mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]\n return (mv1t * mv2r + self.bv.weight).T # [b, k]",
"def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache",
"def ulab_bilinear_interpolation():\n GRID_DATA[1::2, ::2] = SENSOR_DATA[:-1, :]\n GRID_DATA[1::2, ::2] += SENSOR_DATA[1:, :]\n GRID_DATA[1::2, ::2] /= 2\n GRID_DATA[::, 1::2] = GRID_DATA[::, :-1:2]\n GRID_DATA[::, 1::2] += GRID_DATA[::, 2::2]\n GRID_DATA[::, 1::2] /= 2",
"def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])",
"def apply(self,i,x):\n #applies the ith map to the point x\n y = self.A[i,:,:] @ x + self.b[i,:]\n return y",
"def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v",
"def forward(x, pi, A, B):\n # TODO: Write this function.\n #x = x[1]\n B_col = B[:, x[0]] # [N_z, 1]\n alpha = np.multiply(pi, B_col)\n ret = np.zeros((x.shape[0], pi.shape[0]))\n ret[0] = alpha\n for i in range(1, x.shape[0]):\n B_col = B[:, x[i]]\n sum_term = np.dot(A, alpha) #before: alpha, A\n alpha = np.multiply(B_col, sum_term) #before: sum_term before\n ret[i] = alpha\n return ret",
"def call(self, x):\n return tf.tile(x, self._mult)",
"def update_params(self, x_a, r_t, a_t):\n self.A_a[a_t] = self.A_a[a_t] + x_a[:, a_t].reshape(-1, 1).dot(x_a[:, a_t].reshape(-1, 1).T)\n self.A_a_inv[a_t] = inv(self.A_a[a_t])\n self.b_a[a_t] = self.b_a[a_t] + x_a[:, a_t].reshape(-1, 1) * r_t",
"def skydiving_iterate(v, t, dt, X, Y):\n return (v + dt*X(t))/(1 + dt*Y(t)*abs(v))",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def inplace(block_size=20000):\n y = np.empty(len(x))\n for k in range(len(x) // block_size + 1):\n b, e = k * block_size, (k+1) * block_size\n y[b:e] = x[b:e]\n y[b:e] *= .25\n y[b:e] += .75\n y[b:e] *= x[b:e]\n y[b:e] -= 1.5\n y[b:e] *= x[b:e]\n y[b:e] -= 2\n\n return y",
"def _apply_cost_to_vec(\n self, vec: jnp.ndarray, axis: int = 0, fn=None\n ) -> jnp.ndarray:\n vec = jnp.reshape(vec, self.grid_size)\n accum_vec = jnp.zeros_like(vec)\n indices = list(range(1, self.grid_dimension))\n for dimension, geom in enumerate(self.geometries):\n cost = geom.cost_matrix\n ind = indices.copy()\n ind.insert(dimension, 0)\n if axis == 0:\n cost = cost.T\n accum_vec += jnp.sum(\n jnp.tensordot(cost, vec, axes=([0], [dimension])),\n axis=indices,\n keepdims=True\n ).transpose(ind)\n return accum_vec.ravel()",
"def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3",
"def affine(params, x):\n return np.dot(params['w'], x) + params['b']",
"def bilinearInterpolation(self,point,indexes,fieldTranspose):\n x,y = point\n x1,x2 = self.coordinates[0][indexes[0]],self.coordinates[0][indexes[1]]\n y1,y2 = self.coordinates[1][indexes[1]],self.coordinates[1][indexes[2]]\n f11 = fieldTranspose[indexes[0]]\n f21 = fieldTranspose[indexes[1]]\n f12 = fieldTranspose[indexes[2]]\n f22 = fieldTranspose[indexes[3]]\n return 1/((x2-x1)*(y2-y1))*(f11*(x2-x)*(y2-y)+f21*(x-x1)*(y2-y)+f12*(x2-x)*(y-y1)+f22*(x-x1)*(y-y1))",
"def beta_A_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n beta_A = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.beta_A_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n beta_A[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n return beta_A",
"def create_A(matches, tilespecs, mesh, **kwargs):\n # let's assume translation halfsize\n dof_per_tile = 1\n dof_per_vertex = 1\n vertex_per_patch = 3\n nnz_per_row = 2*(dof_per_tile + vertex_per_patch * dof_per_vertex)\n nrows = sum([len(m['matches']['p'][0]) for m in matches])\n nd = nnz_per_row*nrows\n lens_dof_start = dof_per_tile*len(tilespecs)\n\n data = np.zeros(nd).astype('float64')\n b = np.zeros((nrows, 2)).astype('float64')\n indices = np.zeros(nd).astype('int64')\n indptr = np.zeros(nrows+1).astype('int64')\n indptr[1:] = np.arange(1, nrows+1)*nnz_per_row\n weights = np.ones(nrows).astype('float64')\n\n unique_ids = np.array(\n [t.tileId for t in tilespecs])\n\n # nothing fancy here, row-by-row\n offset = 0\n rows = 0\n\n for mi in range(len(matches)):\n m = matches[mi]\n pindex = np.argwhere(unique_ids == m['pId'])\n qindex = np.argwhere(unique_ids == m['qId'])\n\n npoint_pairs = len(m['matches']['q'][0])\n # get barycentric coordinates ready\n pcoords = np.transpose(\n np.vstack(\n (m['matches']['p'][0],\n m['matches']['p'][1])\n )).astype('float64')\n qcoords = np.transpose(\n np.vstack(\n (m['matches']['q'][0],\n m['matches']['q'][1])\n )).astype('float64')\n\n b[rows: (rows + pcoords.shape[0])] = qcoords - pcoords\n rows += pcoords.shape[0]\n pbary = compute_barycentrics(pcoords, mesh, **kwargs)\n qbary = compute_barycentrics(qcoords, mesh, **kwargs)\n\n mstep = np.arange(npoint_pairs) * nnz_per_row + offset\n\n data[mstep + 0] = 1.0\n data[mstep + 1] = -1.0\n data[mstep + 2] = pbary[0][:, 0]\n data[mstep + 3] = pbary[0][:, 1]\n data[mstep + 4] = pbary[0][:, 2]\n data[mstep + 5] = -qbary[0][:, 0]\n data[mstep + 6] = -qbary[0][:, 1]\n data[mstep + 7] = -qbary[0][:, 2]\n\n indices[mstep + 0] = pindex\n indices[mstep + 1] = qindex\n indices[mstep + 2] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 0])\n indices[mstep + 3] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 1])\n indices[mstep + 4] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 2])\n indices[mstep + 5] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 0])\n indices[mstep + 6] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 1])\n indices[mstep + 7] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 2])\n\n offset += npoint_pairs*nnz_per_row\n\n A = csr_matrix((data, indices, indptr), dtype='float64')\n\n wts = sparse.eye(weights.size, format='csr', dtype='float64')\n wts.data = weights\n return A, wts, b, lens_dof_start",
"def linear_forward(A, W, b):\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot( W, A ) + b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache",
"def apply_matrix(self, A):\n assert self.is_vector(), 'Can only apply matrices to vector-valued functions'\n C = np.matmul(A, self.coeffs[..., None])\n assert C.shape[-1] == 1 # this should have created a new singleton axis\n return BSplineFunc(self.kvs, np.squeeze(C, axis=-1))",
"def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state",
"def _inner_product_a0(self, tangent_vec_a, tangent_vec_b, vertex_areas_bp):\n return self.a0 * gs.sum(\n vertex_areas_bp\n * gs.einsum(\"...bi,...bi->...b\", tangent_vec_a, tangent_vec_b),\n axis=-1,\n )",
"def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _",
"def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)"
] | [
"0.59027725",
"0.58240545",
"0.57738847",
"0.5771491",
"0.56887174",
"0.5658218",
"0.56540334",
"0.56478906",
"0.5588295",
"0.55453885",
"0.5531856",
"0.55062973",
"0.548991",
"0.5472191",
"0.54176724",
"0.541392",
"0.5401743",
"0.5387512",
"0.53842276",
"0.53838414",
"0.53745127",
"0.5367906",
"0.5361873",
"0.53534406",
"0.5347286",
"0.5338383",
"0.5333888",
"0.5319858",
"0.53193945",
"0.5316163"
] | 0.6895596 | 0 |
Performs a batched inner product over the last dimension. Replacement for deprecated `from numpy.core.umath_tests import inner1d`. | def batch_inner(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray:
if verify and a.shape != b.shape:
raise ValueError("All dimensions have to be equal")
if a.shape[-1] == 0:
return np.empty_like(a)
return np.einsum("...i,...i->...", a, b) # faster than np.sum(a * b, axis=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]",
"def outer_product(input_sets, axis=0):\n out = cartesian_product(input_sets)\n return np.prod(out, axis=axis)\n\n # try:\n # from pyapprox.cython.utilities import outer_product_pyx\n # # fused type does not work for np.in32, np.float32, np.int64\n # # so envoke cython cast\n # if np.issubdtype(input_sets[0][0], np.signedinteger):\n # return outer_product_pyx(input_sets, 1)\n # if np.issubdtype(input_sets[0][0], np.floating):\n # return outer_product_pyx(input_sets, 1.)\n # else:\n # return outer_product_pyx(input_sets, input_sets[0][0])\n # except ImportError:\n # print('outer_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = len(input_sets[ii])\n # num_elems *= sizes[ii]\n\n # # try:\n # # from pyapprox.weave import c_outer_product\n # # return c_outer_product(input_sets)\n # # except:\n # # print ('outer_product extension failed')\n\n # result = np.empty((num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # result[ii] = 1.0\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # result[ii] *= input_sets[jj][multi_index[jj]]\n\n # return result",
"def batch_outer(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray:\n\n if verify and a.shape[:-1] != b.shape[:-1]:\n raise ValueError(\"All except the last dimension have to be equal\")\n\n return np.einsum(\"...i,...j->...ij\", a, b) # slightly faster than np.multiply(a[...,:,None], b[...,None,:])",
"def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]",
"def outer_product(a, b, batch=False):\n if batch:\n return batch_outer_product(a, b)\n a, b = normalize_and_check_ndim([a, b], 1)\n # The outer product is equivalent to matrix multiplication a * b\n # where the vector a is interpreted as a column matrix and the\n # vector b as a row matrix. The following reshaping and\n # multiplication accomplishes this.\n return a[:, np.newaxis] * b[np.newaxis, :]",
"def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder",
"def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))",
"def batch_outer_sum(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # Due to broadcasting, this sum works analogously to batch matrix\n # multiplication. See also comments in batch_outer_product().\n return a[:, :, np.newaxis] + b[:, np.newaxis, :]",
"def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)",
"def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]",
"def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))",
"def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul",
"def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)",
"def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z",
"def outer(a, b):\n m, n = a.size, b.size\n\n if m * n < 2**14:\n return mul_dense(a.reshape(m, 1), b.reshape(1, n))\n\n out = np.empty((m, n), dtype=common_type(a, b))\n _outer_par(a.ravel(), b.ravel(), out, m, n)\n\n return out",
"def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)",
"def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")",
"def outer(a, b=None):\n if b is None:\n b = a\n size_a = tuple(a.size()) + (b.size()[-1],)\n size_b = tuple(b.size()) + (a.size()[-1],)\n a = a.unsqueeze(dim=-1).expand(*size_a)\n b = b.unsqueeze(dim=-2).expand(*size_b)\n return a, b",
"def outer(x, y):\r\n if x.ndim != 1:\r\n x = x.flatten()\r\n if y.ndim != 1:\r\n y = y.flatten()\r\n return dot(\r\n x.dimshuffle(0, 'x'),\r\n y.dimshuffle('x', 0))",
"def matmul(self, x, work_buffer):\n\n x = asarray(x)\n space = work_buffer.flat\n\n if (x.ndim == 0):\n ValueError(\n \"matmul: Input operand 1 does not have enough dimensions \"\n \"(has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) \"\n \"requires 1\"\n )\n\n if x.ndim == 1 and self.ndim == 1:\n # Dot product\n if x.shape[0] == self._size:\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n out = matmul(self[self._begin:], x[:k]).view(ndarray)\n out += matmul(self[:self._end], x[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = matmul(part, x).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self._size,\n m=x.shape[0]\n )\n )\n elif self.ndim == 1 and x.ndim > 1:\n if self._size == x.shape[-2]:\n out_shape = *x.shape[:-2], x.shape[-1]\n out = empty(out_shape)\n out2 = space[:reduce(operator.mul, out_shape)].reshape(\n out_shape\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x[..., :k, :], out)\n out += matmul(self[:self._end], x[..., k:, :], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif self.ndim == 2:\n if (self.shape[-1] == x.shape[-2]):\n out = empty(\n (*x.shape[:-2], self.shape[-1], x.shape[-2])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x, out[..., :k, :])\n matmul(self[:self._end], x, out[..., k:, :])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n if (self.shape[-1] == x.shape[-2]):\n self_shape = (self._size, *self.shape[1:-2])\n\n starexpr = tuple(\n zip_longest(self_shape, x.shape[:-2], fillvalue=1)\n )\n if star_can_broadcast(starexpr):\n broadcast_shape = tuple(\n starmap(\n lambda a, b: max(a, b),\n starexpr\n )\n )\n\n out = empty(\n (*broadcast_shape, self.shape[-2], x.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n if x.ndim > 2:\n matmul(self[self._begin:], x[:k], out[:k])\n matmul(self[:self._end], x[k:], out[k:])\n else:\n matmul(self[self._begin:], x, out[:k])\n matmul(self[:self._end], x, out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n (\n \"operands could not be broadcast together with\"\n \"remapped shapes [original->remapped]: \"\n \"{shape_b}->({shape_bn}, newaxis,newaxis) \"\n \"{shape_a}->({shape_an}, newaxis,newaxis) \"\n \"and requested shape ({n},{m})\"\n ).format(\n shape_a=self_shape,\n shape_b=x.shape,\n shape_an=self.shape[:-2].__str__()[:-1],\n shape_bn=x.shape[:-2].__str__()[:-1],\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )",
"def outer_sum(a, b, batch=False):\n if batch:\n return batch_outer_sum(a, b)\n # TODO: naming. Surely this has to be called something sensible?\n a, b = normalize_and_check_ndim([a, b], 1)\n # Due to broadcasting, this sum works analogously to matrix\n # multiplication. See also comments in outer_product().\n return a[:, np.newaxis] + b[np.newaxis, :]",
"def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z",
"def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]]\n )\n actual = Adaptor.convert_with_coefficients(data, coefficients, 0)\n np.testing.assert_allclose(actual, expected)\n\n # sum 3rd dim (2, 2, 1)\n coefficients = np.ones((3, 1))\n expected = np.array([[[1.5], [6.0]], [[1.5], [6.0]]])\n actual = Adaptor.convert_with_coefficients(actual, coefficients, 2)\n np.testing.assert_allclose(actual, expected)",
"def _flatten_batch(self, matrix_tups):\n out_vecs = []\n for t in matrix_tups:\n for v in t:\n new_shape = (v.shape[0],)\n if len(v.shape) > 1:\n new_shape = new_shape + (np.prod(v.shape[1:]),)\n out_vecs.append(v.reshape(new_shape))\n return jnp.concatenate(out_vecs, axis=1)",
"def inner_products(t_S, t_Var, t_XS, t_YS, t_XE, t_YE, t_XR, t_YR):\n\n # Note in this computation, we do the indices in this form:\n # b, i, j, t\n # batch, pixel, neuron, time step\n\n # indices: b, i1, j, t\n t_dX = (t_XS.dimshuffle('x', 0, 'x', 'x') -\n t_XE.dimshuffle('x', 'x', 0, 'x') -\n t_XR.dimshuffle(0, 'x', 'x', 1))\n t_dX.name = 'dX'\n # indices: b, i2, j, t\n t_dY = (t_YS.dimshuffle('x', 0, 'x', 'x') -\n t_YE.dimshuffle('x', 'x', 0, 'x') -\n t_YR.dimshuffle(0, 'x', 'x', 1))\n t_dY.name = 'dY'\n\n # Use outer product trick to dot product image with point filters\n t_PixRFCouplingX = T.exp(-0.5 * t_dX ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingY = T.exp(-0.5 * t_dY ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingX.name = 'PixRFCouplingX'\n t_PixRFCouplingY.name = 'PixRFCouplingY'\n\n # Matrix of inner products between the images and the retinal RFs\n # indices: b, j, t\n # Sum_i2 T(i2, i1) * T(b, i2, j, t) = T(b, i1, j, t)\n t_IpsY = T.sum(t_S.dimshuffle('x', 0, 1, 'x', 'x') *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3),\n axis=1)\n # Sum_i1 T(b, i1, j, t) * T(b, i2, j, t) = T(b, j, t)\n t_Ips = T.sum(t_IpsY * t_PixRFCouplingX, axis=1)\n t_Ips.name = 'Ips'\n\n # For the gradient, we also prepare d Ips / dS\n # This is in the form b, i2, i1, j, t\n t_PixRFCoupling = (t_PixRFCouplingX.dimshuffle(0, 'x', 1, 2, 3) *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3))\n\n return t_Ips, t_PixRFCoupling",
"def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t",
"def calc_batch_dot_product_3d2d(Tbs, zc, use_gpu):\n\n if (use_gpu):\n return _calc_batch_dot_product_3d2d_gpu(Tbs, zc)\n\n #Get array dims to reshape model array to 2d\n nz = zc.shape[0]\n nrows = Tbs[0].shape[0]\n model = (Tbs@zc[:, :, None]).reshape((nz, nrows))\n return model",
"def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )",
"def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )",
"def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim"
] | [
"0.7076053",
"0.6733254",
"0.6566906",
"0.6538848",
"0.6200467",
"0.5971671",
"0.5898091",
"0.5882279",
"0.586811",
"0.58539116",
"0.5828389",
"0.58158463",
"0.57993835",
"0.5773256",
"0.57691",
"0.570448",
"0.5675385",
"0.5650381",
"0.5588937",
"0.55867285",
"0.5513858",
"0.55136836",
"0.5509871",
"0.54723763",
"0.5454234",
"0.5443507",
"0.5403955",
"0.538054",
"0.5368168",
"0.53315485"
] | 0.6739568 | 1 |
`probs` values ndarray `k` take the smallest `k` elements, if `reverse` is False and the largest `k` if `reverse` is True `axis` sorting and selection axis. | def batchtopk(
probs: np.ndarray, k: Optional[int] = None, axis: int = -1, reverse: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
if k is not None and k <= 0:
raise ValueError("k must be larger than 0. Use None to chose all elements.")
if axis != -1:
raise ValueError("Only last axis supported atm")
if len(probs.shape) <= 1:
raise ValueError("probs must be at least 2-dimensional")
if reverse:
sign = -1
else:
sign = 1
indices = np.argsort(sign * probs, axis=-1) # use argpartition?
probs = np.take_along_axis(probs, indices[..., :k], axis=-1)
return indices, probs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tflite_top_k_probs(probs, k):\n\n if k > 0:\n return np.flip(probs[0].argsort()[-k:])\n else:\n return np.flip(probs[0].argsort())",
"def tf_top_k_probs(probs, k):\n\n if k > 0:\n return probs.argsort()[-k:][::-1]\n else:\n return probs.argsort()[:][::-1]",
"def indices_of_top_k(arr, k):\n return np.sort(np.argpartition(np.array(arr), -k)[-k:])",
"def permute_via_sort(val, keys, inverse_keys, axis=0):\n # It is *not* safe to use jax.custom_vjp here (see permute_via_gather).\n keys = jax.lax.stop_gradient(keys)\n inverse_keys = jax.lax.stop_gradient(inverse_keys)\n def permute_impl(val):\n # On TPU, sorting scalars by key is faster than a gather.\n _, permuted = jax.lax.sort_key_val(keys, val, dimension=axis)\n return permuted\n def permute_vjp(val):\n permuted = permute_impl(jax.lax.stop_gradient(val))\n def vjpfun(permuted_grad):\n _, val_grad = jax.lax.sort_key_val(\n inverse_keys, permuted_grad, dimension=axis)\n return (val_grad,)\n return permuted, vjpfun\n permute = jax.custom_transforms(permute_impl)\n jax.defvjp_all(permute, permute_vjp)\n return permute(val)",
"def reveal_sort(k, D, reverse=False):\n assert len(k) == len(D)\n library.break_point()\n shuffle = types.sint.get_secure_shuffle(len(k))\n k_prime = k.get_vector().secure_permute(shuffle).reveal()\n idx = types.Array.create_from(k_prime)\n if reverse:\n D.assign_vector(D.get_slice_vector(idx))\n library.break_point()\n D.secure_permute(shuffle, reverse=True)\n else:\n D.secure_permute(shuffle)\n library.break_point()\n v = D.get_vector()\n D.assign_slice_vector(idx, v)\n library.break_point()\n instructions.delshuffle(shuffle)",
"def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops",
"def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant",
"def sort_k_messed_array(arr, k):\n\n if k == 0:\n return arr\n\n for i in range(len(arr)):\n min_index = find_min_index(arr, i, i + k)\n arr[i], arr[min_index] = arr[min_index], arr[i]\n\n return arr",
"def order_preserving_k_max(input_tensor, k):\n ndims = input_tensor.shape.ndims\n \n # get indices of topk elements\n indices = tf.nn.top_k(input_tensor, k, sorted=False).indices#shape [d1,d2..,dn-1,k]\n # sort indices of topk elements\n indices = tf.nn.top_k(indices, k, sorted=True).values#shape [d1,d2..,dn-1,k]\n indices = tf.expand_dims(indices, axis=ndims)#shape [d1,d2..,dn-1,1,k]\n\n # build supporting indices for first n-1 dims\n support = tf.meshgrid(*[tf.range(tf.shape(input_tensor)[d])\n for d in xrange(ndims-1)], indexing='ij')#see numpy.meshgrid\n support = tf.stack(support, axis=ndims-1)#shape [d1,d2..,dn-1,ndims-1]\n support = tf.expand_dims(support, axis=ndims-1)#shape [d1,d2..,dn-1,1,ndims-1]\n support = tf.tile(support, [1]*(ndims-1)+[k, 1])#shape [d1,d2..,dn-1,k,ndims-1]\n\n full_indices = tf.concat([support, indices], axis=ndims)#shape [d1,d2..,dn-1,k,ndims]\n output = tf.gather_nd(input_tensor, full_indices)\n \n return output",
"def k_smallest_sorted(a, k):\r\n k_smallest_idxs = np.argpartition(a, k)[:k]\r\n return k_smallest_idxs[np.argsort(a[k_smallest_idxs])]",
"def find_top_k(predictions, boxes, k):\r\n\r\n if predictions.shape[0] == 0:\r\n predictions2 = torch.Tensor([]).to(device)\r\n labels2 = torch.Tensor([]).to(device)\r\n boxes2 = torch.Tensor([]).to(device)\r\n scores2 = torch.Tensor([]).to(device)\r\n\r\n else:\r\n predictions0 = predictions\r\n scores0 = torch.max(predictions0, dim=1)[0]\r\n labels0 = torch.argmax(predictions0, dim=1)\r\n boxes0 = boxes\r\n\r\n sort = torch.argsort(scores0, descending=True)\r\n boxes1, labels1, scores1, predictions1 = boxes0[sort], labels0[sort], scores0[sort], predictions0[sort]\r\n\r\n boxes2, labels2, scores2, predictions2 = boxes1[:k], labels1[:k] + 1, scores1[:k], predictions1[:k]\r\n\r\n return predictions2, boxes2, labels2, scores2",
"def bboxes_sort(classes, scores, bboxes, top_k = 400):\n# if priority_inside:\n# inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n# (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n# idxes = np.argsort(-scores)\n# inside = inside[idxes]\n# idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes",
"def sorted_top_k(item_counts, k):\n # Partitioning runs in O(d) time.\n top_k_unsorted = np.argpartition(-item_counts, k - 1)[:k]\n # Sorting highest k counts runs in O(k * log(k)) time.\n sorting_order = np.argsort(item_counts[top_k_unsorted])[::-1]\n return top_k_unsorted[sorting_order]",
"def topk(vec, k):\n vec = torch.topk(vec, k)\n return vec.view(-1).data.tolist()",
"def _topk(vec, k):\n # on a gpu, sorting is faster than pytorch's topk method\n #topkIndices = torch.sort(vec**2)[1][-k:]\n # however, torch.topk is more space efficient\n\n # topk on cuda returns what looks like uninitialized memory if\n # vals has nan values in it\n # saving to a zero-initialized output array instead of using the\n # output of topk appears to solve this problem\n topkVals = torch.zeros(k, device=vec.device)\n topkIndices = torch.zeros(k, device=vec.device).long()\n torch.topk(vec**2, k, sorted=False, out=(topkVals, topkIndices))\n\n ret = torch.zeros_like(vec)\n if len(vec.size()) == 1:\n ret[topkIndices] = vec[topkIndices]\n elif len(vec.size()) == 2:\n rows = torch.arange(vec.size()[0]).view(-1,1)\n ret[rows, topkIndices] = vec[rows, topkIndices]\n return ret",
"def bboxes_sort(classes, scores, bboxes, top_k=400):\n # if priority_inside:\n # inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n # (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n # idxes = np.argsort(-scores)\n # inside = inside[idxes]\n # idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes",
"def argsort_desc(scores):\n return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))",
"def top_k(input, k=1, sorted=True, index_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.top_kv2(\n input, k=k, sorted=sorted, index_type=index_type, name=name\n )",
"def partition_arg_topK(matrix, K, axis=0):\n a_part = np.argpartition(matrix, K, axis=axis)\n if axis == 0:\n row_index = np.arange(matrix.shape[1 - axis])\n a_sec_argsort_K = np.argsort(matrix[a_part[0:K, :], row_index], axis=axis)\n return a_part[0:K, :][a_sec_argsort_K, row_index]\n else:\n column_index = np.arange(matrix.shape[1 - axis])[:, None]\n a_sec_argsort_K = np.argsort(matrix[column_index, a_part[:, 0:K]], axis=axis)\n return a_part[:, 0:K][column_index, a_sec_argsort_K]",
"def _prob_in_top_k(\n clean_values, noisy_values, noise_stddev, noisy_top_values, k):\n batch = tf.shape(clean_values)[0]\n m = tf.shape(noisy_top_values)[1]\n top_values_flat = tf.reshape(noisy_top_values, [-1])\n # we want to compute the threshold that a particular value would have to\n # exceed in order to make the top k. This computation differs depending\n # on whether the value is already in the top k.\n threshold_positions_if_in = tf.range(batch) * m + k\n threshold_if_in = tf.expand_dims(\n tf.gather(top_values_flat, threshold_positions_if_in), 1)\n is_in = tf.greater(noisy_values, threshold_if_in)\n if noise_stddev is None:\n return tf.to_float(is_in)\n threshold_positions_if_out = threshold_if_in - 1\n threshold_if_out = tf.expand_dims(\n tf.gather(top_values_flat, threshold_positions_if_out), 1)\n # is each value currently in the top k.\n prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in,\n noise_stddev)\n prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out,\n noise_stddev)\n prob = tf.where(is_in, prob_if_in, prob_if_out)\n return prob",
"def get_top_values(weights, top_k=4):\n top_idx = np.argsort(weights)[-top_k:]\n top_idx = np.flip(top_idx)\n top_values = [weights[i] for i in top_idx]\n return top_idx, top_values",
"def _cmplx_sort(p):\n indx = cupy.argsort(cupy.abs(p))\n return cupy.take(p, indx, 0), indx",
"def argsort(tensor, axis):\n raise NotImplementedError",
"def top_k(m, k):\n ml = m.tolil()\n ms = [_top_k(d, r, k) for d, r in zip(ml.data, ml.rows)]\n return zip(*ms)",
"def top_indices(preds, num):\n sort_preds = np.sort(preds, 1)\n sort_preds = np.flip(sort_preds)\n sort_index = np.argsort(preds, 1)\n sort_index = np.flip(sort_index)\n\n print(f\"Top {num} results:\")\n for i in range(num):\n print(sort_index[0][i], sort_preds[0][i])\n\n return 0",
"def generate_order(arr, descending=True):\n sorted_indices = torch.argsort(arr, 0, descending=descending)\n return sorted_indices.reshape((len(arr), ))",
"def reorder_after_dim_reduction(order):\n arr = sorted(range(len(order)), key=lambda x: order[x])\n return tuple(arr)",
"def _my_top_k(x, k):\n if k > 10:\n return tf.nn.top_k(x, k)\n values = []\n indices = []\n depth = tf.shape(x)[1]\n for i in range(k):\n values.append(tf.reduce_max(x, 1))\n argmax = tf.argmax(x, 1)\n indices.append(argmax)\n if i + 1 < k:\n x += tf.one_hot(argmax, depth, -1e9)\n return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))",
"def top_k_accuracy(y_true : np.ndarray, probs: np.ndarray, k: int) -> float:\r\n \r\n # Top k sorted preds\r\n sorted_probs = probs.argsort()[:,-k:]\r\n\r\n # Does the truth intersect with any of the top k predictions?\r\n matches = np.max(sorted_probs == y_true.reshape(-1, 1), axis=1)\r\n return matches.mean()",
"def decode(self, start: np.ndarray, end: np.ndarray, topk: int,\n max_answer_len: int, sort_with_prob: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n # Ensure we have batch axis\n if start.ndim == 1:\n start = start[None]\n\n if end.ndim == 1:\n end = end[None]\n\n # Compute the score of each tuple(start, end) to be the real answer\n if sort_with_prob:\n candidates = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))\n else:\n candidates = np.expand_dims(start, -1) + np.expand_dims(end, 1)\n\n # Remove candidates with end < start\n candidates[..., np.tri(*candidates.shape[-2:], k=-1, dtype=bool)] = candidates.min() # noqa\n # Remove candidates with end - start > max_answer_len\n candidates[..., ~np.tri(*candidates.shape[-2:], k=max_answer_len - 1, dtype=bool)] = candidates.min() # noqa\n\n # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)\n scores_flat = candidates.flatten()\n if topk == 1:\n idx_sort = [np.argmax(scores_flat)]\n elif len(scores_flat) < topk:\n idx_sort = np.argsort(-scores_flat)\n else:\n idx = np.argpartition(-scores_flat, topk)[0:topk]\n idx_sort = idx[np.argsort(-scores_flat[idx])]\n\n return np.unravel_index(idx_sort, candidates.shape)"
] | [
"0.7283214",
"0.6759031",
"0.58316225",
"0.58071005",
"0.5797564",
"0.57924837",
"0.5782279",
"0.5650926",
"0.55970734",
"0.5593298",
"0.558886",
"0.5553999",
"0.5523884",
"0.548393",
"0.5477123",
"0.5454491",
"0.5419435",
"0.5367509",
"0.5322573",
"0.5320744",
"0.5307545",
"0.5302082",
"0.52796197",
"0.5278191",
"0.5250317",
"0.5230142",
"0.516807",
"0.5133175",
"0.5130909",
"0.5104751"
] | 0.7433971 | 0 |
Calcuates the sum of the logs of the diagonal elements (batchwise if necessary) | def logtrace(m: np.ndarray) -> np.ndarray:
""" note: performance cannot easily be improve by numba.
`np.diagonal` not supported by numba 0.52.0
"""
return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trace(X):\r\n return extract_diag(X).sum()",
"def trace(X):\n return extract_diag(X).sum()",
"def ln_sum_i_neq_j(x):\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n\treturn logsumexp(x_masked, dim=1)",
"def logsum_safe(prob_ll, atl):\n logpdf = prob_ll + K.log(atl + K.epsilon())\n alpha = tf.reduce_max(logpdf, -1, keepdims=True)\n return alpha + tf.log(tf.reduce_sum(K.exp(logpdf-alpha), -1, keepdims=True) + K.epsilon())",
"def _compute_log_xi_sum(n_samples, n_components, fwdlattice, \\\n log_transmat, bwdlattice, batch_framelogprob, \\\n log_xi_sum, logprob, mask):\n\n batch_size=batch_framelogprob.shape[0]\n work_buffer = torch.zeros((batch_size, \\\n log_transmat.shape[0], \\\n log_transmat.shape[1]), \\\n device=mask.device)\n log_transmat = log_transmat.reshape(1,n_components,n_components).repeat(batch_size,1,1)\n \n \n for t in range(n_samples - 1):\n for i in range(n_components):\n work_buffer[:, i,:] = fwdlattice[:, t, i].reshape(-1, 1) + \\\n log_transmat[:, i, :] + \\\n batch_framelogprob[:, t+1, :] + \\\n bwdlattice[:, t+1, :] \\\n - logprob.reshape(-1,1)\n\n log_xi_sum = _logaddexp(log_xi_sum, work_buffer, mask[:,t+1]) \n\n return log_xi_sum",
"def U(xs):\n ret = 0\n for x in xs:\n ret += log(x)\n return ret",
"def log_cum_sum(A, output_sum=False):\n C = [A[0]]\n for a in A[1:]:\n C.append(log_add(C[-1], a))\n C_norm = np.array(C) - C[-1]\n if output_sum:\n return C_norm, C[-1]\n else:\n return C_norm",
"def sum_log(*args):\n # if all(a == LOG_ZERO for a in args):\n # return LOG_ZERO\n a_max = np.max(args, 0)\n lsp = np.log(np.sum([np.exp(a - a_max) for a in args], 0))\n return a_max + lsp",
"def _trace_sparse(op):\n return np.sum(op.diagonal())",
"def nll_diagonal(self, target, mu, logvar):\n precision = torch.exp(-logvar)\n # Loss kernel\n loss = precision * (target - mu)**2.0 + logvar\n # Restore prefactors\n loss += np.log(2.0*np.pi)\n loss *= 0.5\n return torch.mean(torch.sum(loss, dim=1), dim=0)",
"def sum_diag(max_lines):\r\n dsum = 1 # sum of diagonals\r\n cpt = 1 # number of lines processed\r\n val = 1 # value of the current place in the square\r\n inc = 0 # the increment between number for one line\r\n \r\n while cpt < max_lines:\r\n cpt += 2\r\n inc += 2\r\n \r\n for corner in range(4):\r\n val += inc\r\n dsum += val\r\n\r\n return dsum",
"def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))",
"def logsumexp(logv):\n res = logzero()\n for val in logv:\n res = logsum_pair(res, val)\n return res",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)",
"def loglloop(store):\n suml=0.0\n for i in xrange(store['yvec'].shape[0]):\n xbeta=dot(store['xmat'][i,:],store['beta'])\n suml=suml+store['yvec'][i] * xbeta - exp(xbeta)\n return suml",
"def trace(self):\n # TODO 异常 非常规输入处理\n if not self.is_square():\n raise(\n ValueError, \"Cannot calculate the trace of a non-square matrix.\")\n # TODO Calculates the main diagonal num's sum\n sum = 0\n for i in range(self.h):\n sum += self[i][i]\n\n return sum",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())",
"def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r",
"def log_sum_exp_pytorch(vec: torch.Tensor) -> torch.Tensor:\n maxScores, idx = torch.max(vec, 1)\n maxScores[maxScores == -float(\"Inf\")] = 0\n maxScoresExpanded = maxScores.view(vec.shape[0] ,1 , vec.shape[2]).expand(vec.shape[0], vec.shape[1], vec.shape[2])\n return maxScores + torch.log(torch.sum(torch.exp(vec - maxScoresExpanded), 1))",
"def logrels(rets):\n return np.log(rets + 1)",
"def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))",
"def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()",
"def diagonalSum(M):\n diogonalSum = []\n print(M)\n for i in xrange(0, M.shape[0]):\n diogonalSum += M[i][i]\n print(i)\n \n #print(diogonalSum)\n return diogonalSum",
"def _horizontal_log(self, X: np.ndarray) -> (np.ndarray, np.ndarray):\n ret_p = np.zeros_like(X)\n ret_n = np.zeros_like(X)\n log_p = self.manifold.log(X[:, :-1], X[:, 1:])\n log_n = self.manifold.log(X[:, 1:], X[:, :-1])\n ret_p[:, :-1] = log_p\n ret_n[:, 1:] = log_n\n return ret_p, ret_n",
"def trace(matrix):\n\n if len(matrix[0]) == 0:\n return 0.0\n \n return float(sum(matrix[i][i] for i in range(len(matrix))))",
"def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)",
"def _log_sum_exp(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x-m2), axis))",
"def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])",
"def logsumexp(input_matrix, reduction_indices=1, keep_dims=False):\r\n\r\n max_input_matrix1 = input_matrix.max(reduction_indices, keepdims=keep_dims)\r\n max_input_matrix2 = max_input_matrix1\r\n if not keep_dims:\r\n max_input_matrix2 = np.expand_dims(max_input_matrix2, reduction_indices)\r\n return np.log(\r\n np.sum(\r\n np.exp(input_matrix - max_input_matrix2),\r\n reduction_indices,\r\n keepdims=keep_dims)) + max_input_matrix1",
"def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max"
] | [
"0.6600348",
"0.6512393",
"0.63457274",
"0.62601304",
"0.62387985",
"0.6219907",
"0.6185488",
"0.6124841",
"0.6087076",
"0.606045",
"0.6032963",
"0.6027041",
"0.6024959",
"0.60206836",
"0.60127777",
"0.6009793",
"0.598619",
"0.5973478",
"0.5966066",
"0.58722997",
"0.58696294",
"0.5851559",
"0.5845538",
"0.5815091",
"0.5811031",
"0.5809058",
"0.57991326",
"0.5771243",
"0.5762435",
"0.57612073"
] | 0.6544133 | 1 |
Shifts `pvals` by the largest value in the last dimension before the exp is calculated to prevent overflow (batchwise if necessary). Can be used if probabilities are normalized again later. | def shiftedexp(pvals: np.ndarray) -> np.ndarray:
if pvals.shape[-1] == 0:
return np.empty_like(pvals)
return np.exp(pvals - np.amax(pvals, axis=-1)[..., None]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def benjamini_hochberg_step_down(pvals):\r\n tmp = fdr_correction(pvals)\r\n corrected_vals = empty(len(pvals))\r\n max_pval = 1.\r\n for i in argsort(pvals)[::-1]:\r\n if tmp[i] < max_pval:\r\n corrected_vals[i] = tmp[i]\r\n max_pval = tmp[i]\r\n else:\r\n corrected_vals[i] = max_pval\r\n return corrected_vals",
"def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals",
"def softmax(values):\n exps = np.exp(values)\n return exps / sum(exps)",
"def fdr_correction(pvals):\r\n tmp = array(pvals).astype(float) # this converts Nones to nans\r\n return tmp * tmp.size / (1. + argsort(argsort(tmp)).astype(float))",
"def softmax(p):\n p_exp = np.exp(p)\n return p_exp / np.sum(p_exp)",
"def test_correct_p_values(self):\r\n exp = [0.003, 0.006, 0.003]\r\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\r\n assert_almost_equal(obs, exp)",
"def compute_power(pvals, SNPs):\n\tnsnps = len(pvals)\n\tall_snps = np.arange(0, nsnps)\n\tpos = SNPs\n\tnegs = list(set(all_snps) - set(SNPs))\n\n\tpvals_rank = rank_array(pvals)\n\n\trocr = np.zeros((nsnps, 2))\n\tfor i in all_snps:\n\t\tv = pvals_rank[0:i] # test positives\n\t\tz = list(set(all_snps) - set(v)) # test negatives\n\n\t\tTP = len(set(v) & set(pos))\n\t\tFP = len(set(v) & set(negs))\n\t\tTN = len(set(z) & set(negs))\n\t\tFN = len(set(z) & set(pos))\n\n\t\tTPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)\n\n\t\trocr[i, :] = [FPR, TPR]\n\n\treturn rocr",
"def test_correct_p_values_large_correction(self):\r\n exp = [1, None, 0.03, 0.03]\r\n obs = self.mc._correct_p_values([0.5, None, 0.01, 0.01])\r\n self.compare_multiple_level_array(obs, exp)",
"def adjustPValues(p_values, method=\"fdr\"):\n\tadjusted_p_values = p_values[:]\n\tn = len(p_values)\n\tif method.lower() == \"bh\" or method.lower() == 'fdr':\n\t\tni = range(n,0,-1) # from n to 1\n\t\t# Sort the P values and keep track of the indices\n\t\tindexed_pv = sorted(zip(p_values, range(n)), reverse=True)\n\t\t(pvals,inds) = zip(*indexed_pv)\n\t\t# adjust\n\t\tnewp = [(float(n)/ni[xi])*pvals[xi] for xi in range(n)]\n\t\tcum_min_p = [min(newp[0:xi]) for xi in range(1,n+1)]\n\t\tadjp_sorted = [min(p,1.0) for p in cum_min_p]\n\t\t# re-sort\n\t\tadjusted_p_values = [-1]*n\n\t\tfor xi in range(n):\n\t\t\tadjusted_p_values[inds[xi]] = adjp_sorted[xi]\n\telif method.lower() == 'bonferroni':\n\t\tadjusted_p_values = [min(n*p,1.0) for p in p_values]\n\treturn adjusted_p_values",
"def softmax(val, axis=-1):\n exp = np.exp(val - np.amax(val, axis=axis, keepdims=True))\n return exp / np.sum(exp, axis=axis, keepdims=True)",
"def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))",
"def cumprobs(self, values):\n values = np.asarray(values)\n index = np.searchsorted(self.xs, values, side='right')\n ps = self.ps[index-1]\n ps[values < self.xs[0]] = 0.0\n return ps",
"def bonferroni_correction(pvals):\r\n return (\r\n array(pvals, dtype=float) * len(pvals) # float conversion: Nones->nans\r\n )",
"def genvals():\n vals = np.empty(200)\n vals[:50] = np.arange(50) / 50\n vals[50:100] = (50 - np.arange(50)) / 50\n vals[100:] = -vals[:100]\n return vals",
"def save_expval_post_meas_values():\n targets = []\n for statevec in save_expval_final_statevecs():\n values = {}\n for label, (mat, qubits) in save_expval_params().items():\n inner_dict = {}\n for j in [\"00\", \"01\", \"10\", \"11\"]:\n # Check if non-zero measurement probability for given\n # measurement outcome for final statevector\n vec = Statevector.from_label(j)\n if not np.isclose(vec.data.dot(statevec.data), 0):\n # If outcome is non-zero compute expectation value\n # with post-selected outcome state\n inner_dict[hex(int(j, 2))] = vec.data.conj().dot(vec.evolve(mat, qubits).data)\n values[label] = inner_dict\n targets.append(values)\n return targets",
"def lpflip(P):\n if len(P) == 1:\n return 0\n\n Z = logsumexp(P)\n P -= Z\n\n NP = np.exp(np.copy(P))\n\n assert math.fabs(1.0-sum(NP)) < 10.0**(-10.0)\n\n return pflip(NP)",
"def _test_stack(values, pops=0):\n stack = StackWithMax()\n for val in values:\n stack.push(val)\n for _ in range(pops):\n stack.pop()\n\n return stack.max()",
"def softmax(input):\n list_value = []\n len_compute = input.shape[-1]\n shape_input = input.shape\n for x in input.reshape(-1, len_compute):\n # print(x)\n e_x = np.exp(x - np.max(x))\n res = e_x / e_x.sum(axis=0)\n list_value.append(res)\n\n return np.array(list_value).reshape(shape_input)",
"def __ExpMovingAverage(self, values, window):\n weights = np.exp(np.linspace(-1., 0., window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode='full')[:len(values)]\n a[:window] = a[window]\n return a",
"def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X",
"def policy_eval():\r\n \r\n action_prob = [0.125, 0.625, 0.125, 0.125]# actions with probabilities\r\n data = grid_world()\r\n state_axis = np.zeros((9, 9))#initialize states\r\n threshold = .1\r\n prior_state = np.ones((9, 9))\r\n \r\n while np.abs(state_axis - prior_state).max() > threshold:\r\n for x, y in product(range(9), repeat=2):\r\n prior_state = state_axis.copy()\r\n if data.array[x, y] == 'X':\r\n continue\r\n updated_values = [data.next_direction(np.array([x, y]), next_move)\r\n for next_move in data.directions]#Updating states with directions\r\n Sum_Expectation = np.dot(action_prob,\r\n [points_val + 0.9 * state_axis[position[0], position[1]]\r\n for position, points_val in updated_values])\r\n state_axis[x, y] = Sum_Expectation\r\n print(\"\\nExercise 3.1 Shows Value functions for the policy\\n\")\r\n print(state_axis)\r\n build_grid(state_axis, \"Shows Value functions for the policy\")",
"def HC_update(p_values, alpha):\n p_values = np.sort(p_values) # Make sure p-values are sorted in ascending order\n n = len(p_values) # Number of data points\n ivalues = np.arange(1, n + 1)\n #p_values = p_values[0:int(round(n/2))] # Cut-off half of the values\n HC_vec = np.sqrt(n)*(ivalues/(n+1) - p_values)/np.sqrt(p_values - p_values**2) # Calculate scores for all datapoints\n HC_vec_reduced = HC_vec[0:int(alpha*(len(HC_vec)-1))]\n max_idx = np.argmax(HC_vec_reduced)\n return HC_vec_reduced[max_idx], max_idx, HC_vec_reduced",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues",
"def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy",
"def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p",
"def compute_vals(\n self, vals: List[int], modes: int, instruction: MachineInstruction\n ) -> List[int]:\n # Use an intermediate string to obtain any missing leading zeros, since our opcode is\n # already an int\n # Reverse because parameter modes go from right to left\n modes = [int(mode) for mode in reversed(str(modes).zfill(len(vals)))]\n out_vals = [self.parameter_modes[mode](val) for val, mode in zip(vals, modes)]\n\n # Correct for write instructions always being in position mode\n if instruction.writes:\n out_vals[-1] = vals[-1]\n\n return out_vals",
"def softmax(scores):\n exp_score = np.exp(scores)\n return exp_score / np.sum(exp_score)",
"def softmax(X):\n _X = X - np.max(X, axis=1).reshape(-1, 1)\n ep = np.exp(_X)\n return ep / np.sum(ep, axis=1).reshape(-1, 1)",
"def fixval(arr, repval, retarr=False):\n # 2009-09-02 14:07 IJC: Created\n # 2012-12-23 11:49 IJMC: Halved run time.\n\n if retarr:\n arr2 = arr.ravel().copy()\n else:\n arr2 = arr.ravel()\n\n finiteIndex = np.isfinite(arr2)\n if not finiteIndex.any():\n badIndex = find((1-finiteIndex))\n arr2[badIndex] = repval\n\n if retarr:\n return arr2.reshape(arr.shape)\n else:\n return",
"def convert_to_one_tailed(longpvals):\n higher_in_dis = longpvals[longpvals['p'] > 0].index\n longpvals.loc[higher_in_dis, 'p-dis'] = longpvals.loc[higher_in_dis, 'p']/2\n\n higher_in_h = longpvals[longpvals['p'] <= 0].index\n longpvals.loc[higher_in_h, 'p-h'] = abs(longpvals.loc[higher_in_h, 'p']/2)\n\n def p_for_other_side(row, side, otherside):\n if np.isnan(row[side]):\n return 1-row[otherside]\n else:\n return row[side]\n longpvals['p-dis'] = longpvals.apply(p_for_other_side,\n args=('p-dis', 'p-h'),\n axis=1)\n longpvals['p-h'] = longpvals.apply(p_for_other_side,\n args=('p-h', 'p-dis'),\n axis=1)\n return longpvals"
] | [
"0.62695336",
"0.6066202",
"0.58225965",
"0.57363963",
"0.5489803",
"0.5446892",
"0.5247545",
"0.52059096",
"0.5191071",
"0.51882726",
"0.5157705",
"0.5066351",
"0.5033633",
"0.4979771",
"0.4901542",
"0.4892328",
"0.48860258",
"0.48807552",
"0.48794442",
"0.48614326",
"0.48505324",
"0.48391014",
"0.4834803",
"0.48337024",
"0.48271474",
"0.4798808",
"0.4785805",
"0.47762957",
"0.4758349",
"0.4753474"
] | 0.7399692 | 0 |
Sample from list of probabilities `pvals` with replacement. The probabilities don't need to be normalized. | def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:
return Sampler(np.cumsum(pvals)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r",
"def mutateList(values, numMutate, vmin, vmax, rabs=True):\n\tmutations = set()\n\tcount = 0\n\twhile count < numMutate:\n\t\tj = randint(0, len(values)-1)\n\t\tif j not in mutations:\n\t\t\ts = np.random.uniform(vmin, vmax)\n\t\t\tvalues[j] = s if rabs else values[j] * s\n\t\t\tcount += 1\n\t\t\tmutations.add(j)\n\treturn values",
"def sample_distribution(numbers, probabilities, num_samples):\n intervals = []\n intervals.append(probabilities[0])\n new_interval = probabilities[0]\n\n for i in range(1, len(probabilities)):\n new_interval += probabilities[i]\n intervals.append(new_interval)\n\n counter = 0\n new_numbers = []\n while counter <= num_samples:\n for i in range(len(intervals)):\n # Generate a random num between 0 - 1\n # i.e. flip a coin.\n rand_prob = np.random.random_sample((1,))\n if rand_prob <= [intervals[i]]:\n new_numbers.append(numbers[i])\n counter += 1\n\n return new_numbers",
"def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))",
"def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]",
"def __setitem__(self, values, p):\n if isinstance(values, dict):\n values = [values[var] for var in self.variables]\n self.prob[values] = p\n for var,val in zip(self.variables, values):\n if val not in self.vals[var]:\n self.vals[var].append(val)",
"def initPmf(self, values):\n for value, prob in values.items():\n self.set(value, prob)",
"def weighted_values(values, probabilities, size):\n bins = np.add.accumulate(probabilities)\n indices = np.digitize(random_sample(size), bins)\n sample = []\n for ind in indices:\n sample.append(deepcopy(values[ind]))\n return sample",
"def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]",
"def prob_choice(p):\n \n return np.random.random_sample() < p",
"def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)",
"def sample_from_list(l, probs, max_n=None):\n assert len(l) == len(probs), 'given list l and probs must have same length'\n if max_n is None:\n max_n = len(l)\n sum_probs = sum(probs)\n if sum_probs == 0:\n return []\n probs_ = np.array(probs) / sum_probs\n # we draw max n or |probs_ > 0|\n # noinspection PyTypeChecker\n n = min(max_n, np.sum(probs_ > 0))\n # use idx approach as direct passing to np.random.choice would convert\n # items of l into str\n # noinspection PyUnresolvedReferences\n res = [\n l[idx] for idx in np.random.choice(len(l), n, replace=False, p=probs_)\n ]\n return res",
"def resample_particles(self):\n # make sure the distribution is normalized\n self.normalize_particles()\n\n newParticles = []\n for i in range(len(self.particle_cloud)):\n # resample the same # of particles\n choice = random_sample()\n # all the particle weights sum to 1\n csum = 0 # cumulative sum\n for particle in self.particle_cloud:\n csum += particle.w\n if csum >= choice:\n # if the random choice fell within the particle's weight\n newParticles.append(deepcopy(particle))\n break\n self.particle_cloud = newParticles",
"def bonferroni_correction(pvals):\r\n return (\r\n array(pvals, dtype=float) * len(pvals) # float conversion: Nones->nans\r\n )",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues",
"def fdr_correction(pvals):\r\n tmp = array(pvals).astype(float) # this converts Nones to nans\r\n return tmp * tmp.size / (1. + argsort(argsort(tmp)).astype(float))",
"def test_correct_p_values(self):\r\n exp = [0.003, 0.006, 0.003]\r\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\r\n assert_almost_equal(obs, exp)",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = float(pvalues.shape[0])\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n - rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n / rank) * pvalue)\r\n for i in range(0, int(n) - 1):\r\n if new_values[i] < new_values[i + 1]:\r\n new_values[i + 1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues",
"def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))",
"def swapBetweenLists(values1, values2):\n\tp1 = randint(0, len(values1)-1)\n\tp2 = randint(0, len(values2)-1)\n\ttmp = values1[p1]\t\n\tvalues1[p1] = values2[p2]\n\tvalues2[p2] = tmp",
"def sampleWithReplacement(population, choiceSize):\n\n n = len(population)\n _random, _int = random.random, int # speed hack\n return [_int(_random()*n) for _ in itertools.repeat(None, choiceSize)]",
"def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1",
"def sample_gp_pred(self, nsamp, input_list, lv=None):\n x_pred = np.stack(input_list)\n if lv is None:\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'opt'\n or self.params.model_str == 'fixedparam'\n ):\n lv = self.sample_list[0]\n elif (\n self.params.model_str == 'samp'\n or self.params.model_str == 'sampfixedsig'\n ):\n lv = self.sample_list[np.random.randint(len(self.sample_list))]\n postmu, postcov = gp_post(\n self.data.x,\n self.data.y,\n x_pred,\n lv.ls,\n lv.alpha,\n lv.sigma,\n self.params.kernel,\n )\n single_post_sample = sample_mvn(postmu, postcov, 1).reshape(-1)\n pred_list = [\n single_post_sample for _ in range(nsamp)\n ] #### TODO: instead of duplicating this TS, sample nsamp times from generative process (given/conditioned-on this TS)\n return list(np.stack(pred_list).T)",
"def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]",
"def _mutate(self,arr,p_mut):\n mut = np.random.random_sample(arr.shape)<p_mut\n no_mut = ~mut\n mut_val = np.random.uniform(low=self.minval,high=self.maxval,size=arr.shape)\n return (no_mut*arr) + (mut*mut_val)",
"def random_value(self, selected_vals):\n pass",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)"
] | [
"0.62992084",
"0.5945291",
"0.57297397",
"0.55846256",
"0.5577901",
"0.5562105",
"0.54901296",
"0.539993",
"0.5361088",
"0.5338176",
"0.5308725",
"0.53033537",
"0.5288212",
"0.52389336",
"0.52308893",
"0.52133656",
"0.520034",
"0.5190075",
"0.5185366",
"0.5180876",
"0.51693255",
"0.5142584",
"0.5121982",
"0.5107681",
"0.50763744",
"0.50434035",
"0.5041478",
"0.50366807",
"0.5031274",
"0.5031274"
] | 0.6719965 | 0 |
Sample from the categorical distribution using `pvals`. | def categorical(pvals: np.ndarray) -> int:
return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))",
"def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise ValueError('sum of distribution less than one')",
"def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r",
"def __call__(self, *args):\n r = np.random.rand(*args)\n if type(r) is float:\n samples = self.values[(r < self.p).nonzero()[0][0]]\n elif type(r) is np.ndarray:\n samples = np.array(\n [self.values[np.nonzero(x < self.p)[0][0]] \n for x in r.flat]).reshape(r.shape)\n return samples",
"def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()",
"def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()",
"def categorical_sample(prob_n, np_random = None):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np.random.rand()).argmax()",
"def random_choice(p, size):\n k = p.shape[-1]\n\n if p.ndim > 1:\n # If p is an nd-array, the last axis is interpreted as the class\n # probability. We must iterate over the elements of all the other\n # dimensions.\n # We first ensure that p is broadcasted to the output's shape\n size = to_tuple(size) + (1,)\n p = np.broadcast_arrays(p, np.empty(size))[0]\n out_shape = p.shape[:-1]\n # np.random.choice accepts 1D p arrays, so we semiflatten p to\n # iterate calls using the last axis as the category probabilities\n p = np.reshape(p, (-1, p.shape[-1]))\n samples = np.array([np.random.choice(k, p=p_) for p_ in p])\n # We reshape to the desired output shape\n samples = np.reshape(samples, out_shape)\n else:\n samples = np.random.choice(k, p=p, size=size)\n return samples",
"def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p",
"def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p",
"def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p",
"def sample(self, policy_params, **kwargs):\n return self.head.sample(policy_params, **kwargs)",
"def discrete_sample(p, n_samples=None, rng=np.random):\n\n # check distribution\n # assert isdistribution(p), 'Probabilities must be non-negative and sum to one.'\n\n one_sample = n_samples is None\n\n # cumulative distribution\n c = np.cumsum(p[:-1])[np.newaxis, :]\n\n # get the samples\n r = rng.rand(1 if one_sample else n_samples, 1)\n samples = np.sum((r > c).astype(int), axis=1)\n\n return samples[0] if one_sample else samples",
"def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1",
"def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)",
"def sample(pi, sigma, mu):\n # print(\"sample: pi:\", pi.size(), pi)\n categorical = Categorical(pi)\n pis = list(categorical.sample().data)\n sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())\n for i, idx in enumerate(pis):\n sample[i] = sample[i].mul(sigma[i,idx]).add(mu[i,idx])\n return sample",
"def select_five_categories(prob_dist_dict):\n # For clarity, save keys as labels and values as probabilities.\n labels = list( prob_dist_dict.keys() )\n probs = list( prob_dist_dict.values() )\n\n # Use numpy's .choice() to return a label based on the given weight.\n return list( np.random.choice(labels, 5, p=probs) )",
"def prob_choice(p):\n \n return np.random.random_sample() < p",
"def sample_data(_,\n val,\n sampling_strategy=spec.SamplingStrategy.UNDERSAMPLE,\n side=0):\n\n if sampling_strategy == spec.SamplingStrategy.UNDERSAMPLE:\n random_sample_data = random.sample(val, side)\n elif sampling_strategy == spec.SamplingStrategy.OVERSAMPLE:\n random_sample_data = random.choices(val, k=side)\n else:\n raise ValueError(\"Invalid value for sampling_strategy variable!\")\n\n for item in random_sample_data:\n yield item",
"def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)",
"def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j",
"def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())",
"def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)",
"def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))",
"def _get_random_sample(self):\n p=np.zeros(len(self.dim_ranges))\n for i in range(len(self.dim_ranges)):\n temp=np.linspace(self.dim_ranges[i][0],self.dim_ranges[i][1],1000)\n p[i]=np.random.choice(temp,1,True,None)\n\n return p",
"def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]",
"def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)",
"def sample_transitions(self, val) -> None:\n\n # get values\n states = val.states\n pi_conc = val.transitions_conc\n num_states = val.num_states\n\n # count the number of each transition that occurs\n counts = np.zeros((num_states + 1, num_states))\n for i in range(num_states):\n counts[-1, i] = np.sum(states[:, :, 0] == i)\n for j in range(num_states):\n counts[i, j] = np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j))\n counts[-1, -1] = 0 # fluorophores starting photobleached are interpretted as load off only\n\n # sample from dirichlet distribution\n val.transitions = Dirichlet.sample(counts + pi_conc)\n val.P = self.posterior(val)\n\n return"
] | [
"0.6776044",
"0.650533",
"0.64588654",
"0.6282057",
"0.6240135",
"0.6240135",
"0.62277555",
"0.6160603",
"0.6149961",
"0.60994315",
"0.60755867",
"0.60755867",
"0.60755867",
"0.6050657",
"0.6038272",
"0.6014659",
"0.5976859",
"0.5965328",
"0.59274614",
"0.5909927",
"0.58288866",
"0.5760419",
"0.57574266",
"0.5740209",
"0.5732299",
"0.5724458",
"0.5709076",
"0.56702936",
"0.56483847",
"0.5647799"
] | 0.7592371 | 0 |
Convert a population (list of observations) to a CDF. | def population2cdf(population: np.ndarray) -> np.ndarray:
population = np.sort(population)
return np.searchsorted(population, population, side="right") / len(population) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue",
"def cdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.cdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p",
"def cdf(self, data, survival=False, **kwargs):\n\n data = np.array(data)\n if not data.any():\n return array([np.nan]), array([np.nan])\n\n #data = trim_to_range(data, xmin=xmin, xmax=xmax)\n\n n = float(len(data))\n\n data = np.sort(data)\n all_unique = not( any( data[:-1]==data[1:] ) )\n\n if all_unique:\n CDF = np.arange(n)/n\n else:\n #This clever bit is a way of using searchsorted to rapidly calculate the\n #CDF of data with repeated values comes from Adam Ginsburg's plfit code,\n #specifically https://github.com/keflavich/plfit/commit/453edc36e4eb35f35a34b6c792a6d8c7e848d3b5#plfit/plfit.py\n CDF = np.searchsorted(data, data,side='left') / n\n unique_data, unique_indices = np.unique(data, return_index=True)\n data=unique_data\n CDF = CDF[unique_indices]\n\n if survival:\n CDF = 1-CDF\n return data, CDF",
"def cdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.cdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )",
"def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)",
"def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)",
"def cdf(self, value):\n return self._normal.cdf(value)",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def test_cdf(log_prob_coo):\n\n offset_dict = log_prob_coo['offsets']\n\n # the input\n print(log_prob_coo)\n print('input log probs')\n print(log_prob_sparse_to_dense(log_prob_coo['coo']))\n\n # with this shape converter, we get one row, where each value is one m\n converter = IndexConverter(total_n_cells=1,\n total_n_genes=log_prob_coo['coo'].shape[0])\n\n # set up and estimate\n estimator = ThresholdCDF(index_converter=converter)\n noise_csr = estimator.estimate_noise(noise_log_prob_coo=log_prob_coo['coo'],\n noise_offsets=offset_dict,\n q=0.5)\n\n # output\n print('dense noise count estimate, per m')\n out_per_m = np.array(noise_csr.todense()).squeeze()\n print(out_per_m)\n print('truth')\n print(log_prob_coo['cdfs'])\n\n # test\n np.testing.assert_array_equal(out_per_m, log_prob_coo['cdfs'])",
"def icdf(self, value):\n return self._normal.icdf(value)",
"def ecdf(data):\n x = np.sort(data)\n cdf = np.linspace(0, 1, len(x))\n return cdf, x",
"def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf",
"def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)",
"def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf",
"def ca_to_coils_second_df(agent_df):",
"def getCDF(self):\n return self.cdfSample",
"def csv_to_cdf(metadata):\n\n basefile = metadata[\"basefile\"]\n\n try:\n ds = read_exo(basefile + \".csv\", skiprows=metadata[\"skiprows\"])\n except UnicodeDecodeError:\n # try reading as Mac OS Western for old versions of Mac Excel\n ds = read_exo(\n basefile + \".csv\", skiprows=metadata[\"skiprows\"], encoding=\"mac-roman\"\n )\n\n metadata.pop(\"skiprows\")\n\n # write out metadata first, then deal exclusively with xarray attrs\n ds = utils.write_metadata(ds, metadata)\n\n del metadata\n\n ds = utils.ensure_cf(ds)\n\n ds = utils.shift_time(ds, 0)\n\n # configure file\n cdf_filename = ds.attrs[\"filename\"] + \"-raw.cdf\"\n\n ds.to_netcdf(cdf_filename, unlimited_dims=[\"time\"])\n\n print(\"Finished writing data to %s\" % cdf_filename)\n\n return ds",
"def cdf(data, args):\n return Plot._dist(data, args)",
"def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )",
"def cdf(x, point):\n raise NotImplementedError(\"The cdf method has not yet been implemented.\")",
"def pmf2cdf(pdf: np.ndarray) -> np.ndarray:\n\n cdf = np.cumsum(pdf)\n return cdf / cdf[-1]",
"def build_cdf(self,\n label: str, \n weights : str = None) -> np.array :\n\n if not weights == None:\n table = self.sample.sort_values(label)\n w = table[weights].values\n return np.array([table[label].values, np.cumsum(w) / np.sum(w)])\n else:\n return np.array([self.sample.sort_values(label)[label].values, \n (np.arange(len(self.sample)) + 1) / len(self.sample)])",
"def get_counties(popdf: pd.DataFrame, jhucov: str, regionf: str):\n\n\tdfcov = pd.read_csv(jhucov, usecols=JHUC_COLNUM, dtype=JHUC_DTYPE, parse_dates=[3],\n\t\tdayfirst=False, infer_datetime_format=True)\n\tdfcov['Last_Update']= pd.to_datetime(dfcov['Last_Update'], format='%m/%d/%y', exact=True)\n\tdfcov = dfcov.rename(JHUC_RENAM)\n\t# deal with blank county FIPS, primarily in UT, do_regions handles these:\n\tdfcov = do_regions(dfcov, regionf)\n\tdfcov.dropna(inplace=True, subset=['FIPS'])\n\tdfcov.set_index('FIPS', drop=False, inplace=True, verify_integrity=True)\n\tdfcov.sort_index(inplace=True)\n\n\tdf = dfcov.combine_first(popdf)\n\tdf['DeathstoPop'] = 100*(df['Deaths'] / df['Pop'])\n\tdf['CasestoPop'] = 100*(df['Confirmed'] / df['Pop'])\n\t# cleanup on aisle 'floats with NaN's'\n\tdf['DeathstoPop'].fillna(value=0, inplace=True)\n\tdf['CasestoPop'].fillna(value=0, inplace=True)\n\tdf['DeathstoPop'] = pd.to_numeric(df['DeathstoPop'])\n\tdf['CasestoPop'] = pd.to_numeric(df['CasestoPop'])\n\treturn df",
"def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()",
"def cdf(array, figsize, color, label, xlabel, ylabel, title, textsize, xsize, ysize, loc):\r\n fig, ax = plt.subplots(figsize=figsize)\r\n x = np.sort(array)\r\n y = np.array(range(len(array)))/float(len(array))*100 \r\n ax.plot(x, y, color = color, label = label) # plot the CDF\r\n ax.set_title(title, weight = 'bold', size = textsize)\r\n ax.set_xlabel(xlabel, weight = 'bold', size = textsize)\r\n ax.set_ylabel(ylabel, weight = 'bold', size = textsize)\r\n plt.xticks(fontsize = xsize)\r\n plt.yticks(fontsize = ysize)\r\n plt.legend(loc = loc)",
"def gen_population(self):\n df = self.get_df()\n idd_list = list(set(df['idd'].to_list()))\n date_list = df['ft_data_dt'].to_list()\n st_data_dt = min(date_list)\n end_data_dt = max(date_list)\n self.set_st_data_dt(st_data_dt)\n self.set_end_data_dt(end_data_dt)\n date_series = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M', name='ft_data_dt')\n date_frame = date_series.to_frame()\n idd_series = pd.Series(data=idd_list, name='idd')\n idd_frame = idd_series.to_frame()\n date_frame['key'] = 0\n idd_frame['key'] = 0\n population = idd_frame.merge(date_frame, on='key', how='outer').drop(columns=['key'])\n self.set_population(population)"
] | [
"0.6000258",
"0.56762284",
"0.56762284",
"0.55965334",
"0.54399467",
"0.54305595",
"0.5391821",
"0.5380843",
"0.53685206",
"0.5361804",
"0.53427804",
"0.5326272",
"0.52933216",
"0.5286579",
"0.5269808",
"0.5198982",
"0.5191929",
"0.5165977",
"0.5149848",
"0.51354104",
"0.5111973",
"0.5103631",
"0.5088575",
"0.5079822",
"0.5057541",
"0.5044703",
"0.503962",
"0.5026708",
"0.50149685",
"0.5001102"
] | 0.62257254 | 0 |
Convert a discrete PDF into a discrete CDF. | def pmf2cdf(pdf: np.ndarray) -> np.ndarray:
cdf = np.cumsum(pdf)
return cdf / cdf[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf",
"def pdf(self,x):\n return self.categoricalDist.pdf(x)",
"def pdf(self,x):\n if x in self.values:\n pdfValue = self.mapping[x]\n else:\n if self.isFloat:\n vals = sorted(list(self.values))\n idx = bisect.bisect(vals, x)\n pdfValue = self.mapping[list(vals)[idx]]\n else:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate pdf for ' + str(x))\n return pdfValue",
"def contrast_pdf(contdc, contdc_sigma, dc_tru, dc_sigma, contrast_axis, npts=8000, display=False):\n\n dc_axis = np.linspace(dc_tru - 8 * dc_sigma, dc_tru + 8 * dc_sigma, npts)\n dc_mesh, contrast_mesh = np.meshgrid(dc_axis, contrast_axis)\n contdc_mesh = dc_mesh * contrast_mesh\n\n pdf_contdc = scipy.stats.rice.pdf(contdc_mesh, contdc / contdc_sigma, scale=contdc_sigma, loc=0.)\n pdf_dc, _ = norm_pdf(dc_tru, dc_sigma, x=dc_mesh)\n joint_pdf = pdf_contdc * pdf_dc\n\n # normalise joint PDF\n area = np.trapz(np.trapz(joint_pdf, contdc_mesh, axis=0), dc_axis)\n joint_pdf /= area\n\n # calculate the ratio pdf\n integrand = abs(dc_mesh) * joint_pdf\n contrast_pdf = np.trapz(integrand, dc_mesh, axis=1)\n\n if display:\n plt.figure()\n plt.imshow(pdf_contdc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(pdf_dc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(joint_pdf)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(integrand)\n plt.colorbar()\n\n plt.figure()\n plt.plot(contrast_axis, contrast_pdf)\n\n plt.show()\n\n return contrast_pdf",
"def pdf_from_cdf(data, idx, what):\n\n cdf = data[what + '_sum'].cumsum() / data[what + '_sum'].sum()\n cdfi = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(idx)\n pdfi = np.hstack((cdfi[0], np.diff(cdfi) / np.diff(idx)))\n return pdfi",
"def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def c_pdf(self, x):\n\n assert x > 0\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n xn = np.subtract(x, loc) / scale\n\n # update x\n ft = shape * xn ** (shape - 1) * np.exp(-xn ** shape)\n return ft / scale",
"def cdf(x, point):\n raise NotImplementedError(\"The cdf method has not yet been implemented.\")",
"def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))",
"def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')",
"def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]",
"def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')",
"def pdf_comp(self, x, cid, log = False):\n if self.mode == 'diag':\n va = self.va[cid]\n elif self.mode == 'full':\n va = self.va[cid*self.d:(cid+1)*self.d]\n else:\n raise GmParamError(\"\"\"var mode %s not supported\"\"\" % self.mode)\n\n if log:\n return D.gauss_den(x, self.mu[cid], va, log = True) \\\n + N.log(self.w[cid])\n else:\n return D.multiple_gauss_den(x, self.mu[cid], va) * self.w[cid]",
"def pdf(self,x):\n if self.transformation:\n pdfValue = self.pdfInTransformedSpace(x)\n else:\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def pdf(self, x):\n raise NotImplementedError",
"def icdf(self, value):\n return self._normal.icdf(value)",
"def _convertCdfPointsToDistr(self,pts):\n try:\n return self.ppf(pts.real)\n except TypeError:\n return list(self.ppf(x) for x in pts)",
"def _compute_single_pdf(self, **kwargs):\n raise NotImplementedError",
"def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue",
"def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue",
"def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf",
"def _dncb_pdf(x, a1, a2, mu1, mu2):\n out = st.beta.pdf(x, a1, a2, loc=0)\n out *= np.exp(-mu1-mu2)\n out *= hchg(x, a1, a2, mu1, mu2)\n return out",
"def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf",
"def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue",
"def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)",
"def pdf(self):\n\n pdf = PDF(self.valuesArray)\n return pdf.axes[0], pdf.pdf"
] | [
"0.69857043",
"0.61699057",
"0.59049094",
"0.5864199",
"0.58318394",
"0.57629466",
"0.573494",
"0.5734838",
"0.56632376",
"0.5655967",
"0.56498164",
"0.56412953",
"0.558819",
"0.55852294",
"0.5578906",
"0.5552537",
"0.5533782",
"0.5533782",
"0.551519",
"0.5459205",
"0.5448148",
"0.5439202",
"0.54346496",
"0.54346496",
"0.5407957",
"0.5399415",
"0.5383903",
"0.5379233",
"0.5377733",
"0.53326327"
] | 0.6718552 | 1 |
Calculate stochastic matrix `pm` to the power of infinity, by finding the eigenvector which corresponds to the eigenvalue 1. | def inf_matrix_power(pm: np.ndarray, dtype=np.float64) -> np.ndarray:
w, v = np.linalg.eig(
pm
) # scipy.linalg.eig would probably by faster as it can return the left and right eigen vectors
if not np.isclose(w[0], 1.0):
raise ValueError("The first eigenvalue is not none. Is this a right stochastic matrix?")
vi = np.linalg.inv(v)
d = np.zeros(pm.shape[0], dtype=dtype)
d[0] = 1.0
return np.matmul(v, np.matmul(np.diag(d), vi)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0",
"def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))",
"def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)",
"def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")",
"def P(self):\n self.eigenmatrix()",
"def posdef_eig_self_adjoint(mat):\n evals, evecs = tf.self_adjoint_eig(mat)\n evals = tf.abs(evals) # Should be equivalent to svd approach.\n\n return evals, evecs",
"def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]",
"def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)",
"def initial_energy(spin_matrix, n_spins):\n\n E = 0\n M = 0\n\n for i in range(n_spins):\n for j in range(n_spins):\n\n left = spin_matrix[i-1, j] if i>0 else spin_matrix[n_spins - 1, j]\n above = spin_matrix[i, j-1] if j>0 else spin_matrix[i, n_spins - 1]\n\n E -= spin_matrix[i,j]*(left+above)\n M += spin_matrix[i,j]\n\n return E, M",
"def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors",
"def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E",
"def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()",
"def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None",
"def Problem4(n):\n A = Problem2(n)\n eig = min(sl.eigs(A.asfptype(), which='SM')[0])\n \n print \"lamba*n^2 approaches pi^2 as n goes to infinity\"\n return eig*n**2",
"def eigCent(A):\n lam,V = np.linalg.eig(A)\n v = V[:,np.argmax(lam)]\n v = v*(1./v[0])\n return v",
"def solve(mat, y):\n reduced = gaussian_elim(mat)\n sol = np.zeros(shape=(mat.shape[0]))\n S = 0\n for i in reversed(range(len(sol))):\n sol[i] = (y[i]-S) / reduced[i][i]\n S += y[i] - S\n return sol",
"def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)",
"def calculate_biggest_eigenvalue(cls, covariance_matrix):\n timer = TimerHandler()\n timer.start(\"eigen2\")\n eigvals = scipy.linalg.eigh(covariance_matrix, \n eigvals_only = True, \n eigvals = (covariance_matrix.shape[0] -1,covariance_matrix.shape[0]-1), \n overwrite_a = True)\n return eigvals[0]",
"def heavy_fixCM_eigvals(NP, b, c, params):\n l = params['l']\n k = params['k']\n I3 = params['I3']\n # Here, omega_3 is just the MAGNITUDE, not signed\n w3 = np.abs(params['w3'][0])\n gn = params['Mm'] * params['g']\n\n # Check output if small system\n print 'gn = ', gn\n print 'b = ', b\n print 'c = ', c\n\n if NP == 1:\n pass\n elif NP == 2:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n -1j * l * gn / (I3 * w3),\n l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n -l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3)\n ])\n print 'exact_eigvals are =', eigvals\n return eigvals\n elif NP == 3:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0., 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0., 0., 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn - 2. * (-1) ** (b) * l ** 2 * k) / (I3 * w3), 0., \\\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [0., 0., (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n # -1j*l*gn/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - 3. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j-3.*l*k*(-1)**(b) - gn)/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j - l*k*(-1)**(b) - gn)/(I3*w3)\n ])\n return eigvals\n else:\n return np.array([])",
"def power_iteration(X):\n #X, languages=prepare_data_matrix()\n M=X\n M=M-np.mean(M, axis=0)\n M=np.cov(M, rowvar=False) #the covariance matrix, size 100x100\n x=np.ones(len(M)) #a random starting vector composed of 100 ones, it only cant be of all zeros\n difference=np.ones(len(x))\n\n #print(np.linalg.norm(difference))\n while np.linalg.norm(difference) >= 10**-5: #we iterate until the difference between the previous and the new x is really small, lets say 10^-5\n #print(x.T.shape)\n oldx=x\n z=M.dot((x.T))\n x=z.T\n x=x/np.linalg.norm(x)\n difference=np.linalg.norm(oldx-x)\n #the x that we get at the end of this loop is our eigenvector\n\n #print(x.dot(M).shape)\n #print(x.shape)\n y=(x.dot(M)).dot(x.T) #y is the corresponding eigenvalue to the eigenvector x\n \n return x, y",
"def current(edges, transition_matrix):\n ### Calculate the state frequecies ###\n # Eigenvalues and Eigenvectors of transition matrix\n vals, vl, vr = sp.linalg.eig(transition_matrix, left=True)\n # Find the eigenvalue that == 1\n index = list(vals).index(1)\n state_freq = vl[:,index]\n\n committor_plus = np.linalg.eig\n\n\n ### Calculate the flux matrix ###\n flux_matrix = np.multiply(transition_matrix, state_freq)\n return flux_matrix / flux_matrix.sum(axis=1)",
"def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb",
"def el_ph(om,eig,q,zs,mass,eps,rG,nmodes,nqpt,nat):\n\n # Initiate\n g = np.zeros((nqpt,nmodes),dtype=complex)\n\n # Initiate q+G\n qpG = np.zeros((nqpt,3))\n\n q_c = q[:,0:3] \n q2 = np.zeros(nqpt)\n N = 5 # Initial size of G-point grid used for sum\n\n alpha = 5.0 # Convergence parameter\n\n for nn in range(-N,N+1):\n for mm in range(-N,N+1):\n for ll in range(-N,N+1):\n #\n for ic in range(3):\n qpG[:,ic] = q_c[:,ic] + nn*rG[0,ic] + mm*rG[1,ic] + ll*rG[2,ic]\n # IMPORTANT : Put a check here that qpG is nonzero! (break the loop if so)\n # Denominator\n q2[:] = 0.0\n for ia in range(3): \n for ib in range(3):\n q2[:] += qpG[:,ia]*eps[ia,ib]*qpG[:,ib]\n # \n inv_q2 = 1.0 / (q2 + 1e-10)\n arg = np.exp(-0.25 * np.sum(qpG**2, axis=1) / alpha) * inv_q2 # exp((q+G)^2/4a)\n \n for imod in range(nmodes):\n for ia in range(3):\n for ib in range(3):\n for iat in range(nat):\n g[:,imod] += arg[:]*qpG[:,ia]*zs[iat,ia,ib]*eig[imod,:,iat,ib] \\\n / np.sqrt(2.0*mass[iat]*np.abs(om[imod,:])+1e-10)\n\n return g",
"def compute_steady_state_pi(adj_mat):\n\n return 1. * np.sum(adj_mat, axis=0) / np.sum(adj_mat) # d_j / 2|E|",
"def test_em_nonlinear(self):\n z_matrix = np.array(\n [[0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.16666667],\n [0.03333333, 0.08333333, 0.00000000],\n [0.03333333, 0.08333333, 0.16666667],\n [0.06666667, 0.16666667, 0.00000000],\n [0.06666667, 0.16666667, 0.16666667],\n [0.10000000, 0.16666667, 0.00000000],\n [0.10000000, 0.16666667, 0.16666667],\n [0.13333333, 0.08333333, 0.00000000],\n [0.13333333, 0.08333333, 0.16666667],\n [0.16666667, 0.00000000, 0.00000000],\n [0.16666667, 0.00000000, 0.16666667]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.20724531, 0.31710188, 0.47565280],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)",
"def gaussianElimKer(M, zero, one):\n # V satisfies the invariant\n # M = V M_0\n V = [Polynomial([zero] * i + [one]) for i in range(len(M))]\n pivots = [None] * (len(M) + 1)\n for l in range(len(M)):\n while M[l].deg >= 0:\n idp = M[l].deg\n if pivots[idp] is None:\n pivots[idp] = l\n break\n else:\n c = M[l][idp] / M[pivots[idp]][idp]\n M[l] -= c * M[pivots[idp]]\n V[l] -= c * V[pivots[idp]]\n else:\n # If a line is null, we found an element of the kernel\n return V[l]\n return None",
"def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v",
"def evd_spd_decomposition(P):\n\t\n\t# Assert Matrix P is symetric\n\tassert check_symmetric(P)\t\n\n\t# singular value decomposition\n\tL, Q = np.linalg.eig(P)\n\n\t#if L and Q returned in incorrect order\n\t#L = np.sort(L)\n\t#Q = Q[:, L.argsort()]\n\n\t# Create matrix W = Vtsqrt(diagnol(D))\n\tM = np.dot(Q, np.sqrt(np.diag(L)))\n\n\treturn M",
"def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs",
"def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs"
] | [
"0.62551945",
"0.60196066",
"0.59203595",
"0.5908042",
"0.589194",
"0.5879424",
"0.5878952",
"0.5874368",
"0.5866997",
"0.5835588",
"0.58008105",
"0.5785766",
"0.5769242",
"0.576087",
"0.5740334",
"0.5668045",
"0.55842084",
"0.5565213",
"0.5563019",
"0.55508184",
"0.55072254",
"0.5490288",
"0.5480005",
"0.5477882",
"0.5471057",
"0.5466267",
"0.5446539",
"0.5446466",
"0.5414399",
"0.54142153"
] | 0.763889 | 0 |
Replace colored pixels with a `neutral_color`. The `ratio` defines the 'colorfulness' above which level the pixel should be replace. I.e. if the `ratio` is 1 nothing will be replaced, if `ratio` is 0 only strict greys are kept unmodified. | def remove_color(img: np.ndarray, ratio: float, neutral_color: Tuple[int, int, int] = RGB_WHITE) -> None:
channels = img.shape[-1]
assert channels == 3, "Not a 3 channel color image"
norm = np.std(np.array(RGB_YELLOW)) # this is the same for all pure colors
sd = np.std(img, axis=-1)
img[sd > ratio * norm] = neutral_color | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ratio_to_rgb(ratio):\n b = 0\n if round(ratio, 1) == 0.5:\n r = 255\n g = 255\n elif ratio < 0.5:\n r = int(ratio * 2 * 255.0)\n g = 255\n else:\n r = 255\n g = int((1.0 - ratio) * 2 * 255.0)\n rgb = (r, g, b)\n\n return rgb",
"def set_neutral(self):\n\t\tself._head.set_pan(0.0)",
"def to_aspect_ratio_add_and_remove(image, target_ratio):\n height = image.shape[0]\n width = image.shape[1]\n ratio = width / height\n\n remove_top = 0\n remove_right = 0\n remove_bottom = 0\n remove_left = 0\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n\n # loops here are inefficient, but easy to read\n i = 0\n if ratio < target_ratio:\n # vertical image, height > width\n while ratio < target_ratio:\n if i % 4 == 0:\n remove_top += 1\n height -= 1\n elif i % 4 == 2:\n remove_bottom += 1\n height -= 1\n elif i % 4 == 1:\n pad_right += 1\n width += 1\n else: # i % 4 == 3\n pad_left += 1\n width += 1\n ratio = width / height\n i += 1\n elif ratio > target_ratio:\n # horizontal image, width > height\n while ratio > target_ratio:\n if i % 4 == 0:\n remove_right += 1\n width -= 1\n elif i % 4 == 2:\n remove_left += 1\n width -= 1\n elif i % 4 == 1:\n pad_top += 1\n height += 1\n else: # i % 4 == 3\n pad_bottom += 1\n height += 1\n ratio = width / height\n i += 1\n\n # remove cols/rows\n if any([val > 0 for val in [remove_top, remove_right, remove_bottom, remove_left]]):\n image = image[remove_top:(height - remove_bottom), remove_left:(width - remove_right), ...]\n\n # add cols/rows (black)\n if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):\n image = np.pad(image, ((pad_top, pad_bottom), \\\n (pad_left, pad_right), \\\n (0, 0)), \\\n mode=\"constant\")\n\n return image",
"def set_slide_neutral(self):\n print(\"Moving to neutral pose...\")\n joint_positions = deepcopy(self.neutral_joint_positions)\n\n joint_positions['right_j5'] = joint_positions['right_j5'] - np.pi / 2.\n self._right_arm.move_to_joint_positions(joint_positions)",
"def set_ratio(self, ratio: tuple) -> None:\r\n self.ratio = ratio",
"def src_set_ratio(state, new_ratio):\n return _lib.src_set_ratio(state, new_ratio) if state else None",
"def _update_classification_localization_weight_ratio(configs, ratio):\n meta_architecture = configs[\"model\"].WhichOneof(\"model\")\n if meta_architecture == \"faster_rcnn\":\n model = configs[\"model\"].faster_rcnn\n model.first_stage_localization_loss_weight = 1.0\n model.first_stage_objectness_loss_weight = ratio\n model.second_stage_localization_loss_weight = 1.0\n model.second_stage_classification_loss_weight = ratio\n if meta_architecture == \"ssd\":\n model = configs[\"model\"].ssd\n model.loss.localization_weight = 1.0\n model.loss.classification_weight = ratio",
"def undersample_majority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_0_under = df_class_0.sample(\n int(ratio * count_class_1), random_state=random_state\n )\n df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n # print(df_train_under['Status'].value_counts)\n return df_train_under",
"def generate_noise_image(self, content_image, noise_ratio=0.6):\n noise_image = np.random.uniform(-20, 20,\n (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype('float32')\n # White noise image from the content representation. Take a weighted average\n # of the values\n img = noise_image * noise_ratio + content_image * (1 - noise_ratio)\n return img",
"def _neutral(self) -> np.ndarray:\n # Get the neutral configuration of the actual model\n qpos = neutral(self.robot.pinocchio_model)\n\n # Make sure it is not out-of-bounds\n position_limit_lower = self.robot.position_limit_lower\n position_limit_upper = self.robot.position_limit_upper\n for idx, val in enumerate(qpos):\n lo, hi = position_limit_lower[idx], position_limit_upper[idx]\n if hi < val or val < lo:\n qpos[idx] = 0.5 * (lo + hi)\n\n # Return rigid/flexible configuration\n if self.simulator.use_theoretical_model:\n return qpos[self.robot.rigid_joints_position_idx]\n return qpos",
"def correct_rhohv(radar, rhohv_name=\"RHOHV\", snr_name=\"SNR\"):\n rhohv = radar.fields[rhohv_name][\"data\"].copy()\n snr = radar.fields[snr_name][\"data\"].copy()\n\n natural_snr = 10 ** (0.1 * snr)\n natural_snr = natural_snr.filled(-9999)\n rho_corr = rhohv * (1 + 1 / natural_snr)\n\n # Not allowing the corrected RHOHV to be lower than the raw rhohv\n rho_corr[np.isnan(rho_corr) | (rho_corr < 0) | (rho_corr > 1)] = 1\n try:\n rho_corr = rho_corr.filled(1)\n except Exception:\n pass\n\n return rho_corr",
"def skinPercent(*args, ignoreBelow: Union[float, bool]=0.0, normalize: bool=True, pruneWeights:\n float=0.0, relative: bool=True, resetToDefault: bool=True, transform:\n Union[AnyStr, bool]=\"\", transformMoveWeights: Union[AnyStr, List[AnyStr]]=\"\",\n transformValue: Union[List[AnyStr, float], List[List[AnyStr, float]]]=None,\n value: bool=True, zeroRemainingInfluences: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def clean_ratio(sub, total, default=0.0):\n return sub / total if total else default",
"def balance_all(self,\n majority_minority_ratio: int = 1,\n random_state: int or None = RANDOM_STATE\n ):\n\n self.balance_training(majority_minority_ratio, random_state=random_state)\n self.balance_testing(majority_minority_ratio, random_state=random_state)",
"def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph",
"def randomly_negate_level(value: Any) -> Any:\n return -value if ImageTransformationBase._toss_fair_coin() else value",
"def test_colormap_as_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0), ax=ax, colors=\"cool\"\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n tol = (\n 3.2 if sys.platform == \"win32\" else 0.01\n ) # Fails on AppVeyor with RMS 3.143\n self.assert_images_similar(visualizer, remove_legend=True, tol=tol)",
"def remove_missing_values(train_data, test_data, ratio):\n missing_ratios = _get_missing_ratios(train_data)\n removed_attribute_indexes = list(\n map(\n lambda item: str(item[0] + 1),\n filter(lambda item: item[1] > ratio, enumerate(missing_ratios))))\n data_filter = Filter(\n classname=\"weka.filters.unsupervised.attribute.Remove\",\n options=[\"-R\", \",\".join(removed_attribute_indexes)])\n data_filter.inputformat(test_data)\n return data_filter.filter(train_data), data_filter.filter(test_data)",
"def test_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0),\n ax=ax,\n colors=[\"red\", \"green\", \"blue\", \"indigo\", \"cyan\", \"lavender\"],\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)",
"def set_transparent(img):\n assert img.shape[-1] == 4\n white_pix = np.all(img == [255, 255, 255, 255], axis=-1)\n # print(white_pix)\n img[white_pix, -1] = 0\n # return img",
"def setup_ratio(args, ax, ax_ratio):\n main_ticks = ax.yaxis.get_major_ticks()\n main_ticks[0].label1.set_visible(False)\n ax.yaxis.set_label_coords(-0.12,1)\n ax_ratio.yaxis.set_label_coords(-0.12,.5)\n if args.logx:\n ax_ratio.set_xscale('log')\n if args.xlabel:\n ax_ratio.set_xlabel(tex_escape(args.xlabel), x=1, ha='right')\n if args.rlabel:\n ax_ratio.set_ylabel(args.rlabel)\n if args.limits:\n ax_ratio.set_xlim(args.limits[0],args.limits[1])\n if args.rmin is not None:\n ax_ratio.set_ylim(bottom=args.rmin)\n if args.rmax is not None:\n ax_ratio.set_ylim(top=args.rmax)\n ax_ratio.yaxis.grid(True)\n xmin, xmax, ymin, ymax = ax_ratio.axis()\n ax_ratio.yaxis.set_major_locator(ticker.MaxNLocator(3))\n ax_ratio.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n if not args.logx:\n ax_ratio.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n return",
"def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)",
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph",
"def update_classify(self, img, rho=0.01, threshold=2.5):\n self.update(img, rho)\n return self.classify(img, threshold)",
"def generate_noise_image(content_image, noise_ratio = CONFIG.NOISE_RATIO):\n\tnoise_img = np.random.uniform(-20,20,(1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\n\t# Setting the resulting image to be the weighted average of the content image and noise_image\n\tresult_img = noise_img * noise_ratio + content_image * (1 - noise_ratio)\n\n\treturn result_img",
"def random_img_to_gray(self, img, p = 0.5):\n if self.decision(p):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.zeros_like(img)\n img[:, :, 0] = gray\n img[:, :, 1] = gray\n img[:, :, 2] = gray\n return img",
"def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)",
"def line_ratio(ratio_name,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n if ratio_name == 'NII':\n line1,line2 = '[NII]122','[NII]205'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio != 0]\n label = '%s / %s' % (line1,line2)\n\n if ratio_name == 'OICII':\n line1,line2 = '[OI]63','[CII]'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio > 1e-2]\n ratio = np.log10(ratio[ratio != 0])\n label = 'log %s / %s' % (line1,line2)\n\n fig,ax = plt.subplots(figsize=(10,8))\n h = ax.hist(ratio,bins=10,color='orange')\n\n ax.set_xlabel(label,fontsize=15)\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/ratio_%s' % ratio_name,dpi=300)",
"def oversample_minority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_1_over = df_class_1.sample(\n int(ratio * count_class_0), replace=True, random_state=random_state\n )\n df_train_over = pd.concat([df_class_0, df_class_1_over], axis=0)\n # print(df_train_over['Status'].value_counts())\n return df_train_over"
] | [
"0.4983211",
"0.48789826",
"0.48116347",
"0.4737011",
"0.47265878",
"0.46818957",
"0.4571823",
"0.45462266",
"0.44600105",
"0.4397929",
"0.42731524",
"0.427132",
"0.4269558",
"0.42693788",
"0.42575642",
"0.42499575",
"0.42406985",
"0.42318156",
"0.4226033",
"0.4211065",
"0.41977745",
"0.41921994",
"0.41916838",
"0.41903993",
"0.41874236",
"0.41733915",
"0.41585425",
"0.41452932",
"0.41436285",
"0.41376895"
] | 0.7399279 | 0 |
np.broadcast_shapes requires `numpy==1.20.0`, which is not available for `python < 3.7`. | def broadcast_shapes(*shapes: Tuple[int, ...]) -> Tuple[int, ...]:
arrays = [np.empty(shape) for shape in shapes]
return np.broadcast(*arrays).shape | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_broadcast_dims():\r\n test((1, 2, 3))\r\n test((2, 1, 3))\r\n test((2, 3, 1))\r\n test2((1, 2, 3))\r\n test2((2, 1, 3))\r\n test2((2, 3, 1))",
"def broadcast_shape(*shapes, **kwargs):\n strict = kwargs.pop(\"strict\", False)\n reversed_shape = []\n for shape in shapes:\n for i, size in enumerate(reversed(shape)):\n if i >= len(reversed_shape):\n reversed_shape.append(size)\n elif reversed_shape[i] == 1 and not strict:\n reversed_shape[i] = size\n elif reversed_shape[i] != size and (size != 1 or strict):\n raise ValueError(\n \"shape mismatch: objects cannot be broadcast to a single shape: {}\".format(\n \" vs \".join(map(str, shapes))\n )\n )\n return tuple(reversed(reversed_shape))",
"def broadcast_shapes(*args):\n x = list(np.atleast_1d(args[0])) if args else ()\n for arg in args[1:]:\n y = list(np.atleast_1d(arg))\n if len(x) < len(y):\n x, y = y, x\n x[-len(y):] = [j if i == 1 else i if j == 1 else i if i == j else 0\n for i, j in zip(x[-len(y):], y)]\n if not all(x):\n return None\n return tuple(x)",
"def _infer_ndim_bcast(ndim, shape, *args):\r\n\r\n # Find the minimum value of ndim required by the *args\r\n if args:\r\n args_ndim = max(arg.ndim for arg in args)\r\n else:\r\n args_ndim = 0\r\n\r\n if isinstance(shape, (tuple, list)):\r\n # there is a convention that -1 means the corresponding shape of a\r\n # potentially-broadcasted symbolic arg\r\n #\r\n # This case combines together symbolic and non-symbolic shape\r\n # information\r\n shape_ndim = len(shape)\r\n if ndim is None:\r\n ndim = shape_ndim\r\n else:\r\n if shape_ndim != ndim:\r\n raise ValueError('ndim should be equal to len(shape), but\\n',\r\n 'ndim = %s, len(shape) = %s, shape = %s'\r\n % (ndim, shape_ndim, shape))\r\n\r\n bcast = []\r\n pre_v_shape = []\r\n for i, s in enumerate(shape):\r\n if hasattr(s, 'type'): # s is symbolic\r\n bcast.append(False) # todo - introspect further\r\n pre_v_shape.append(s)\r\n else:\r\n if s >= 0:\r\n pre_v_shape.append(tensor.as_tensor_variable(s))\r\n bcast.append((s == 1))\r\n elif s == -1:\r\n n_a_i = 0\r\n for a in args:\r\n # ndim: _ _ _ _ _ _\r\n # ashp: s0 s1 s2 s3\r\n # i\r\n if i >= ndim - a.ndim:\r\n n_a_i += 1\r\n a_i = i + a.ndim - ndim\r\n if not a.broadcastable[a_i]:\r\n pre_v_shape.append(a.shape[a_i])\r\n bcast.append(False)\r\n break\r\n else:\r\n if n_a_i == 0:\r\n raise ValueError(('Auto-shape of -1 must overlap'\r\n 'with the shape of one of the broadcastable'\r\n 'inputs'))\r\n else:\r\n pre_v_shape.append(tensor.as_tensor_variable(1))\r\n bcast.append(True)\r\n else:\r\n ValueError('negative shape', s)\r\n # post-condition: shape may still contain both symbolic and\r\n # non-symbolic things\r\n if len(pre_v_shape) == 0:\r\n v_shape = tensor.constant([], dtype='int32')\r\n else:\r\n v_shape = tensor.stack(*pre_v_shape)\r\n\r\n elif shape is None:\r\n # The number of drawn samples will be determined automatically,\r\n # but we need to know ndim\r\n if not args:\r\n raise TypeError(('_infer_ndim_bcast cannot infer shape without'\r\n ' either shape or args'))\r\n template = reduce(lambda a, b: a + b, args)\r\n v_shape = template.shape\r\n bcast = template.broadcastable\r\n ndim = template.ndim\r\n else:\r\n v_shape = tensor.as_tensor_variable(shape)\r\n if ndim is None:\r\n ndim = tensor.get_vector_length(v_shape)\r\n bcast = [False] * ndim\r\n\r\n if (not (v_shape.dtype.startswith('int') or\r\n v_shape.dtype.startswith('uint'))):\r\n raise TypeError('shape must be an integer vector or list',\r\n v_shape.dtype)\r\n\r\n if args_ndim > ndim:\r\n raise ValueError(\r\n 'ndim should be at least as big as required by args value',\r\n (ndim, args_ndim), args)\r\n\r\n assert ndim == len(bcast)\r\n return ndim, tensor.cast(v_shape, 'int32'), tuple(bcast)",
"def broadcast_arrays(*args):\n args = [np.asarray(_m) for _m in args]\n shapes = [x.shape for x in args]\n if len(set(shapes)) == 1:\n # Common case where nothing needs to be broadcasted.\n return args\n shapes = [list(s) for s in shapes]\n strides = [list(x.strides) for x in args]\n nds = [len(s) for s in shapes]\n biggest = max(nds)\n # Go through each array and prepend dimensions of length 1 to each of\n # the shapes in order to make the number of dimensions equal.\n for i in range(len(args)):\n diff = biggest - nds[i]\n if diff > 0:\n shapes[i] = [1] * diff + shapes[i]\n strides[i] = [0] * diff + strides[i]\n # Chech each dimension for compatibility. A dimension length of 1 is\n # accepted as compatible with any other length.\n common_shape = []\n for axis in range(biggest):\n lengths = [s[axis] for s in shapes]\n unique = set(lengths + [1])\n if len(unique) > 2:\n # There must be at least two non-1 lengths for this axis.\n raise ValueError(\"shape mismatch: two or more arrays have \"\n \"incompatible dimensions on axis %r.\" % (axis,))\n elif len(unique) == 2:\n # There is exactly one non-1 length. The common shape will take\n # this value.\n unique.remove(1)\n new_length = unique.pop()\n common_shape.append(new_length)\n # For each array, if this axis is being broadcasted from a\n # length of 1, then set its stride to 0 so that it repeats its\n # data.\n for i in range(len(args)):\n if shapes[i][axis] == 1:\n shapes[i][axis] = new_length\n strides[i][axis] = 0\n else:\n # Every array has a length of 1 on this axis. Strides can be\n # left alone as nothing is broadcasted.\n common_shape.append(1)\n\n # Construct the new arrays.\n broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in\n zip(args, shapes, strides)]\n return broadcasted",
"def shape_to_broadcast(shape):\n return tuple(n==1 for n in shape)",
"def generalized_broadcast(arrays):\n arrays1 = np.broadcast_arrays(*[A[..., 0] for A in arrays])\n shapes_b = [A1.shape + (A.shape[-1],) for A1, A in zip(arrays1, arrays)]\n strides_b = [A1.strides + (A.strides[-1],) for A1, A in zip(arrays1, arrays)]\n arrays_b = [as_strided(A, shape=shape_Ab, strides=strides_Ab)\n for A, shape_Ab, strides_Ab in zip(arrays, shapes_b, strides_b)]\n return arrays_b",
"def testBroadcastDimension(self, axis, row_length, original_dim_sizes,\n broadcast_dim_sizes):\n original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)\n bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)\n self.assertEqual(original_shape.rank, bcast_shape.rank)\n # shape[axis].value == 1 and row_length > 1:\n bcast1 = original_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == shape[axis].value:\n bcast2 = bcast_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == 1:\n bcast3 = bcast_shape.broadcast_dimension(axis, 1)\n\n self.assertShapeEq(bcast1, bcast_shape)\n self.assertShapeEq(bcast2, bcast_shape)\n self.assertShapeEq(bcast3, bcast_shape)",
"def promote_shapes(*args):\n if len(args) < 2:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n batch_shape = lax.broadcast_shapes(*shapes)\n num_dims = len(batch_shape)\n return [\n jnp.reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims\n else arg\n for arg, s in zip(args, shapes)\n ]",
"def broadcast_to(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = BroadcastTo(shape).apply((x,))\n return y",
"def can_broadcast(shape1, shape2) -> bool:\n return(\n reduce(\n lambda a, b: a and b,\n starmap(\n lambda a, b: (a == b or (a == 1 or b == 1)),\n zip_longest(shape1, shape2, fillvalue=1)\n )\n )\n )",
"def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.ones((1, 2, 5), dtype=np.intp)\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, (3, 2, 5))",
"def test_cross_multiply_shape():\n array_1 = np.ones((1, 3))\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape",
"def broadcast_rule(shape_a, shape_b):\n assert(isinstance(shape_a, tuple))\n assert(isinstance(shape_b, tuple))\n if len(shape_a) > len(shape_b):\n longer_shape, shorter_shape = shape_a, shape_b\n else:\n longer_shape, shorter_shape = shape_b, shape_a\n len_diff = len(longer_shape) - len(shorter_shape)\n for i in range(len_diff):\n # pad with leading 1s\n shorter_shape = (1,) + shorter_shape\n assert len(shorter_shape) == len(longer_shape)\n output_shape = list(longer_shape)\n for i in range(len(output_shape)):\n assert (shorter_shape[i] == longer_shape[i]) \\\n or (shorter_shape[i] == 1) \\\n or (longer_shape[i] == 1)\n output_shape[i] = max(shorter_shape[i], longer_shape[i])\n return tuple(output_shape)",
"def _broadcast_shape(\n data, rank, world_size, num_parts, is_feat_data, feat_name\n):\n assert len(data.shape) in [\n 1,\n 2,\n ], f\"Data is expected to be 1-D or 2-D but got {data.shape}.\"\n data_shape = list(data.shape)\n\n if len(data_shape) == 1:\n data_shape.append(1)\n\n if is_feat_data:\n data_shape.append(DATA_TYPE_ID[data.dtype])\n\n data_shape = torch.tensor(data_shape, dtype=torch.int64)\n data_shape_output = [\n torch.zeros_like(data_shape) for _ in range(world_size)\n ]\n dist.all_gather(data_shape_output, data_shape)\n logging.debug(\n f\"[Rank: {rank} Received shapes from all ranks: {data_shape_output}\"\n )\n shapes = [x.numpy() for x in data_shape_output if x[0] != 0]\n shapes = np.vstack(shapes)\n\n if is_feat_data:\n logging.debug(\n f\"shapes: {shapes}, condition: {all(shapes[0,2] == s for s in shapes[:,2])}\"\n )\n assert all(\n shapes[0, 2] == s for s in shapes[:, 2]\n ), f\"dtypes for {feat_name} does not match on all ranks\"\n\n # compute tids here.\n type_counts = list(shapes[:, 0])\n tid_start = np.cumsum([0] + type_counts[:-1])\n tid_end = np.cumsum(type_counts)\n tid_ranges = list(zip(tid_start, tid_end))\n logging.debug(f\"starts -> {tid_start} ... end -> {tid_end}\")\n\n return tid_ranges",
"def test_bootstrap_array_shape():\n test_array = np.zeros((3, 4))\n test_axis = 1\n nboot = 5\n new_array = utils.bootstrap_array(test_array, nboot=nboot, axis=test_axis)\n shape = (3, 4, 5)\n assert shape == new_array.shape",
"def test_preserve_broadcastable(self):\r\n x = tensor.matrix().dimshuffle('x', 0, 'x', 1, 'x')\r\n y = x.max(axis=1)\r\n assert y.type.broadcastable == (True, True, False, True)",
"def test_convolve_broadcast(self, fn, x_shape, y_shape):\n # 1. Test broadcast case\n x = torch.rand(x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(y_shape, dtype=self.dtype, device=self.device)\n out1 = getattr(F, fn)(x, y)\n # 2. Test without broadcast\n y_clone = y.expand(x_shape).clone()\n assert y is not y_clone\n assert y_clone.shape == x.shape\n out2 = getattr(F, fn)(x, y_clone)\n # check that they are same\n self.assertEqual(out1, out2)",
"def relay_distribute(c, array, shape):\n assert shape.is_constant(tuple)\n # Make sure shape is a tuple of builtin Python integers.\n relay_shape = tuple(int(dim) for dim in shape.value)\n return relay.op.broadcast_to(c.ref(array), relay_shape)",
"def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4\n put_along_axis(a, ai, 20, axis=1)\n assert_equal(take_along_axis(a, ai, axis=1), 20)",
"def match_shapes(arrs):\n #temp = [(name, np.asarray(a), deg) for name, a, deg in arrs]\n #ndim = max([a.ndim - deg for _, a, deg in arrs])\n\n temp = [a for name, a, deg in arrs]\n for i in range(len(temp)):\n if np.isscalar(temp[i]):\n temp[i] = np.array(temp[i])\n ndim = max([a.ndim - deg for a, (_, _, deg) in zip(temp, arrs)])\n\n prep_arrs = []\n for name, a, deg in arrs:\n if np.isscalar(a):\n a = np.asarray(a)\n if a.ndim < deg:\n raise RuntimeError('%s.ndim must be at least %d' % (name, deg))\n if a.ndim < ndim + deg:\n #a = a.reshape((1,) * (ndim + deg - a.ndim) + a.shape)\n slc = (nax,) * (ndim + deg - a.ndim) + (Ellipsis,)\n a = a[slc]\n prep_arrs.append(a)\n\n return prep_arrs",
"def _fix_bias_shape(self, op_name, inputs, attrs):\n if (op_name == 'Add' or op_name == 'Mul') and \\\n ('broadcast' in attrs and attrs['broadcast'] == 1):\n assert len(list(inputs)) == 2\n bias_name = self._renames.get(inputs[1], inputs[1])\n bias = self._params[bias_name]\n assert len(bias.shape) == 1\n # reshape to (1, n)\n bias = mx.nd.array(bias.asnumpy().reshape((1, -1, 1, 1)))\n # broadcast_add expects shape with sym.variable\n self._nodes[bias_name] = mx.sym.Variable(name=bias_name, shape=bias.shape)\n self._params[bias_name] = bias",
"async def infer_shape_broadcast_shape(track, shpx, shpy):\n tx = await shpx['type']\n ty = await shpy['type']\n n = max(len(tx.elements), len(ty.elements))\n return TupleShape([NOSHAPE] * n)",
"def test_unbroadcast_addbroadcast(self):\r\n\r\n x = matrix()\r\n assert unbroadcast(x, 0) is x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is x\r\n assert unbroadcast(x, 0, 1) is x\r\n\r\n assert addbroadcast(x, 0) is not x\r\n assert addbroadcast(x, 1) is not x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 0), 0) is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is not x\r\n x = row()\r\n assert unbroadcast(x, 0) is not x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is not x\r\n assert unbroadcast(x, 0, 1) is not x\r\n\r\n assert addbroadcast(x, 0) is x\r\n assert addbroadcast(x, 1).owner.inputs[0] is x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n assert addbroadcast(x, 0, 1).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 1), 1) is x\r\n assert addbroadcast(unbroadcast(x, 1), 1) is not x\r\n\r\n # The first broadcast is remove the broadcast, so the second\r\n # should not make one\r\n assert unbroadcast(unbroadcast(x, 0), 0).owner.inputs[0] is x\r\n\r\n # Test that consecutive Rebroadcast op are fused\r\n x = TensorType(dtype='float64', broadcastable=(True, True))()\r\n assert unbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is x",
"def conv_broadcast(x, kernel_shape, padding, strides):\n if len(kernel_shape) == 2:\n return conv2d_broadcast(x, kernel_shape[0], kernel_shape[1],\n padding, strides)\n elif len(kernel_shape) == 1:\n return conv1d_broadcast(x, kernel_shape[0], padding, strides[0])\n else:\n raise ValueError()",
"def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)",
"def broadcast_array(array, axis_index, shape):\n\n if type(axis_index) in [float, int]:\n start_axis_index = end_axis_index = axis_index\n else:\n assert len(axis_index) == 2\n start_axis_index, end_axis_index = axis_index\n \n dim = start_axis_index - 1\n while dim >= 0:\n array = array[numpy.newaxis, ...]\n array = numpy.repeat(array, shape[dim], axis=0)\n dim = dim - 1\n \n dim = end_axis_index + 1\n while dim < len(shape): \n array = array[..., numpy.newaxis]\n array = numpy.repeat(array, shape[dim], axis=-1)\n dim = dim + 1\n\n return array",
"def _fix_shape(self, value):\n for k, v in self.variables.items():\n if len(v.shape) < len(value.shape):\n a, b = self._broadcast(value, v)\n self.variables[k] = np.zeros(a.shape, dtype=b.dtype) + b",
"def test_broadcastable_flag_assignment_mixed_thisaxes(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(2, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(False, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(0, a, b)\r\n assert not c.type.broadcastable[0]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)\r\n # Should raise an error if b_val.shape[0] is not 1\r\n # We can't set the value|\r\n self.assertRaises(TypeError, b.set_value,\r\n rng.rand(3, 4, 1).astype(self.floatX))\r\n a = TensorType(dtype=self.floatX, broadcastable=[0, 0, 1])()\r\n b = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1])()\r\n c = join(0, a, b)\r\n f = function([a, b], c, mode=self.mode)\r\n bad_b_val = rng.rand(3, 4, 1).astype(self.floatX)\r\n self.assertRaises(TypeError, f, a_val, bad_b_val)",
"def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((2, 4, 4))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))"
] | [
"0.63662064",
"0.631577",
"0.6181781",
"0.60380113",
"0.59342825",
"0.5925404",
"0.58053595",
"0.57856745",
"0.57498085",
"0.56896067",
"0.55596524",
"0.55482703",
"0.548472",
"0.548077",
"0.540709",
"0.5397413",
"0.53940207",
"0.5373903",
"0.53049004",
"0.526517",
"0.52371943",
"0.5168172",
"0.5129427",
"0.51159716",
"0.5099658",
"0.5093899",
"0.5056028",
"0.50246346",
"0.50067145",
"0.4981141"
] | 0.6529921 | 0 |
Batched center of mass calculation of 2d arrays | def center_of_mass_2d(arr: np.ndarray, dtype=np.float32) -> np.ndarray:
total = np.sum(arr, axis=(-1, -2))
grids = np.ogrid[[slice(0, i) for i in arr.shape[-2:]]]
with np.errstate(invalid="ignore"):
results = np.array([np.sum(arr * grid.astype(dtype), axis=(-1, -2)) / total for grid in grids], dtype=dtype)
results = np.moveaxis(results, 0, -1)
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n",
"def _center_of_mass(a, positions, shape, dtype):\n\n result = numpy.empty((1,), dtype=dtype)\n\n positions_nd = numpy.unravel_index(positions, shape)\n a_sum = numpy.sum(a)\n\n a_wt_i = numpy.empty(a.shape)\n for i, pos_nd_i in enumerate(positions_nd):\n a_wt_sum_i = numpy.multiply(a, pos_nd_i, out=a_wt_i).sum()\n result[\"com\"][0, i] = a_wt_sum_i / a_sum\n\n return result[0]",
"def center_of_mass(im_binary, x_offset=0, y_offset=0):\n n = np.sum(im_binary)\n\n x = np.arange(im_binary.shape[1]) + x_offset\n y = np.arange(im_binary.shape[0]) + y_offset\n xv, yv = np.meshgrid(x, y)\n cx = np.sum(xv[im_binary]) / n\n cy = np.sum(yv[im_binary]) / n\n\n return cx, cy",
"def get_center_of_mass_allies(self,obs):",
"def calculate_centers_of_mass(x_all, y_all):\n num_of_frames, num_of_rafts = x_all.shape\n\n x_centers = x_all[:, 0:num_of_rafts].mean(axis=1)\n y_centers = y_all[:, 0:num_of_rafts].mean(axis=1)\n\n x_relative_to_centers = x_all - x_centers[:, np.newaxis]\n y_relative_to_centers = y_all - y_centers[:, np.newaxis]\n\n distances_to_centers = np.sqrt(x_relative_to_centers ** 2 + y_relative_to_centers ** 2)\n\n orbiting_angles = np.arctan2(y_relative_to_centers, x_relative_to_centers) * 180 / np.pi\n\n return distances_to_centers, orbiting_angles, x_centers, y_centers",
"def center_of_mass(elements, coordinates):\n mass = molecular_weight(elements)\n mass_array = np.array([[atomic_mass[i.upper()]] * 3 for i in elements])\n mass_coordinates = coordinates * mass_array\n return (np.sum(mass_coordinates, axis=0) / np.array([mass, mass, mass]))",
"def centre_of_mass(image, black_blob=False):\r\n image = image.copy()\r\n shape = image.shape\r\n if black_blob:\r\n image = 255-image\r\n centre = np.array([0, 0]).astype(float)\r\n\r\n #------------------------------START YOUR CODE-----------------------------#\r\n s = np.sum(image)\r\n indices = np.mgrid[0:image.shape[0],0:image.shape[1]]\r\n ys = np.sum(indices[0]*image)\r\n xs = np.sum(indices[1]*image)\r\n\r\n # Equivalent, but slower\r\n #xs = 0.0\r\n #ys = 0.0\r\n #s = 0.0 \r\n #for y in range(shape[0]):\r\n # for x in range(shape[1]):\r\n # p = image[y, x]\r\n # xs += x*p\r\n # ys += y*p\r\n # s += p\r\n\r\n centre = np.array([ ys/s, xs/s ])\r\n #-------------------------------END YOUR CODE------------------------------#\r\n return centre.astype(int)",
"def center_of_mass(xy, masses):\n return np.sum(masses.reshape(len(xy), 1) * xy.astype(np.float), axis=0) / float(np.sum(masses))",
"def CenterOfMass(points):\n A = AreaOfPolygon(points)\n N = len(points)\n cx = 0\n cy = 0\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n part = (x_i * y_ip1 - x_ip1 * y_i)\n cx += ((x_i + x_ip1) * part)\n cy += ((y_i + y_ip1) * part)\n return (cx/(6*A), cy/(6*A), abs(A))",
"def cell_center_fast(seg_img: np.ndarray, labels: np.ndarray) -> np.ndarray:\n array_max_idx = max(labels)\n results = np.zeros((array_max_idx + 1, 3))\n results = compute_cell_center(seg_img, labels, results)\n\n return results",
"def center_of_mass(molecule):\n xcom=ycom=zcom=0\n totm = 0\n for atom in get_atoms(molecule):\n m = get_mass(atom)\n x,y,z = get_xyz(atom)\n xcom += m*x\n ycom += m*y\n zcom += m*z\n totm += m\n xcom /= totm\n ycom /= totm\n zcom /= totm\n return xcom,ycom,zcom",
"def centre(arrayin):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n cy = 0.0\r\n cx = 0.0\r\n for i in range(ny):\r\n for j in range(nx):\r\n cy += np.float64(arrayin[i,j]) * np.float64(i - ny/2 + 1)\r\n cx += np.float64(arrayin[i,j]) * np.float64(j - nx/2 + 1)\r\n cx = cx / np.sum(arrayin)\r\n cy = cy / np.sum(arrayin)\r\n arrayout = np.roll(arrayin ,-int(cy),0)\r\n arrayout = np.roll(arrayout,-int(cx),1)\r\n return [arrayout,cy,cx]",
"def center(X):\n \n n,m = X.shape\n if n != m:\n raise Exception('Matrix is not square.')\n \n colsum = X.sum(axis=0) / n\n rowsum = X.sum(axis=1) / n\n totalsum = X.sum() / (n**2)\n \n #center\n Y = array([[ X[i,j]-rowsum[i]-colsum[j]+totalsum for i in range(n) ] for j in range(n)])\n \n return Y",
"def calculate_center_of_mass(symbols, coordinates):\n\n total_mass = calculate_molecular_mass(symbols)\n\n mass_array = np.zeros([len(symbols),1])\n\n for i in range(len(symbols)):\n mass_array[i] = atomic_weights[symbols[i]]\n\n center_of_mass = sum(coordinates * mass_array) / total_mass\n\n return center_of_mass",
"def center_of_mass(points):\n # break into many triangles\n # each point is part of two triangles\n cor = [sum(points) / len(points)]\n mass_points = []\n area = 0\n for i in range(len(points) - 1):\n triangle = cor + points[i:i + 2]\n # print(triangle)\n mass_points.append(build_triangle_point_mass(triangle))\n area += shoelace_area(triangle)\n # print(triangle, area)\n mass_points.append(build_triangle_point_mass(cor + [points[-1], points[0]]))\n area += shoelace_area(cor + [points[-1], points[0]])\n return Vector2D(*find_com(*zip(*mass_points))), area",
"def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results",
"def CenterOfMassForShape(shape):\n polygons = SplitIntoPolygons(shape)\n total_A = 0\n total_cx = 0\n total_cy = 0\n\n for polygon in polygons:\n cx, cy, A = CenterOfMass(polygon)\n total_cx += A * cx\n total_cy += A * cy\n total_A += A\n\n return (total_cx / total_A, total_cy / total_A)",
"def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)",
"def center(self, center_mass=False):\n if center_mass:\n com = self.center_of_mass\n self.xyz -= com\n else:\n self.xyz -= self.xyz.mean(0)",
"def get_center_of_mass_enemies(self,obs):",
"def getCenterOfMass(self, filtered = True):\n n_time = len(self.pos)\n x_mean = [0.0,]*n_time\n y_mean = [0.0,]*n_time\n z_mean = [0.0,]*n_time \n for frame in range(n_time):\n # get all the positions of the filtered points\n x,y,z = self.getAllPositions(frame, filtered)\n x_mean[frame] = np.asarray(x).mean() if len(x) > 0 else None\n y_mean[frame] = np.asarray(y).mean() if len(y) > 0 else None\n z_mean[frame] = np.asarray(z).mean() if len(z) > 0 else None\n\n return x_mean, y_mean, z_mean",
"def centres_of_mass_2D(image):\n centroids = []\n bords = []\n areas = []\n radius = []\n \n for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'Area', 'equivalent_diameter', 'Label']): \n \n # Skip wrong regions\n index = np.where(image==info['Label'])\n if index[0].size==0 & index[1].size==0:\n continue\n \n # TODO: change this value\n if info['Area'] > image.shape[0] / 4.:\n \n \n centre = info['Centroid']\n D = info['equivalent_diameter']\n \n #min_row, min_col, max_row, max_col = info['BoundingBox']\n #a1 = int((max_row - min_row) / 2.)\n #a2 = int((max_col - min_col) / 2.)\n \n #box_cent = (a1 + min_row, a2 + min_col)\n \n radius.append(round(D / 2.0, 3))\n centroids.append( (round(centre[0], 3),round(centre[1], 3)) )\n #bords.append(box_cent)\n\n return [centroids, radius]",
"def calculate_center_of_mass(chainVecs: IMP.algebra.Vector3Ds):\n return IMP.algebra.get_centroid(chainVecs)",
"def get_center_of_masses(self) -> np.array:\n com = np.average(self.obj[:, :2], weights=self.obj[:, 2], axis=0)\n return com",
"def center(emg_data: np.ndarray, center_value: float = None) -> np.ndarray:\n center_value = center_value if center_value else emg_data.mean(axis=1)\n emg_centered = np.copy(emg_data)\n for i in range(emg_data.shape[0]):\n emg_centered[i, :] = emg_data[i, :] - center_value[i]\n return emg_centered",
"def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])",
"def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]",
"def center_of_mass(mask):\n M = cv2.moments(mask)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [int(cX), int(cY)]",
"def barycentre (liste_objets):\r\n x = 0\r\n y = 0\r\n summass = 0\r\n for i in liste_objets:\r\n x += i.mass * i.posx\r\n y += i.mass * i.posy\r\n summass += i.mass\r\n x /= summass\r\n y /= summass\r\n return x,y,summass",
"def center_of_mass(self, matrix):\n # Changing the positions of all objects relative to center of mass, in origo.\n x, y, z = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 1:4], axis=0)/(np.sum(matrix[:, 0], axis=0))\n print('Center of mass located at (%.4g, %.4g, %.4g)' %(x, y, z))\n # x-direction\n matrix[:, 1] = matrix[:, 1]-x\n # y-direction\n matrix[:, 2] = matrix[:, 2]-y\n # z-direction\n matrix[:, 3] = matrix[:, 3]-z\n # The Suns initial velocity which makes the total momentum of the system zero\n # velcity_sun = sum(mass_planet_i*veocity_planet_i)/(mass_sun)\n u, v, w = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 4:7], axis=0)/(matrix[0, 0])\n print('The initial velocity of the Sun (%.4g, %.4g, %.4g)' %(u, v, w))\n matrix[0, 4:7] = u, v, w\n # Returning the modified matrix\n return matrix"
] | [
"0.7113222",
"0.6913504",
"0.68147796",
"0.66269344",
"0.6623906",
"0.6605114",
"0.6573181",
"0.6555954",
"0.65233356",
"0.65076435",
"0.6476598",
"0.64425707",
"0.6414334",
"0.6406729",
"0.63674235",
"0.63496435",
"0.6226081",
"0.6160094",
"0.61026037",
"0.6084239",
"0.6066669",
"0.60612375",
"0.60513264",
"0.603614",
"0.60261333",
"0.6025939",
"0.60209906",
"0.595546",
"0.5950495",
"0.58809245"
] | 0.74505234 | 0 |
validate_target verifies that target is a valid MAC address, IP address or hostname | def validate_target(target, arp_table):
try:
mac = mac_address(target)
return mac
except TypeError:
pass
try:
ip = ip_address(target)
if ip in arp_table.keys():
return arp_table[ip].mac
except TypeError:
pass
if target in arp_table:
return arp_table[target].mac
else:
raise TypeError('{} is not a valid target'.format(target)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()",
"def target_is_valid(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except:\n return False\n return target['state'] != \"invalid\"",
"def valid(self, target):",
"def verify_as_host(self, target, message_handler):\n\n # Check we can host the target.\n if not self.supported_target(target, message_handler):\n raise UserException(\n \"{0} is not a supported {1} development host\".format(\n self.name, target.name))",
"def _is_valid_target(hostname):\n if not hostname:\n return False\n\n # Check if it's a valid IP\n if _is_valid_ipv4_address(hostname) or _is_valid_ipv6_address(hostname):\n return True\n\n # Check if it's a valid DNS name\n\n if hostname[-1] == '.':\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n if len(hostname) < 1 or len(hostname) > 253: # Technically 255 octets but 2 are used for encoding\n return False\n\n labels = hostname.split(\".\")\n\n # the TLD must be not all-numeric\n if re.match(r\"[0-9]+$\", labels[-1]):\n return False\n\n allowed = re.compile(r\"(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(label) for label in labels)",
"def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)",
"def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')",
"def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))",
"def check(self, target, port):\n pass",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"def supported_target(self, target, message_handler):\n\n # iOS can never be a host.\n return False",
"def validate_target(data, handshake):\n\n if data['header'] != handshake:\n END_POINT({\n 'status': 'invalid-handshake',\n 'handshake': handshake,\n })\n comment('handshake: %r' % data['header'])\n\n # Import all requested modules\n for mod in data.get('imports', ()):\n importlib.import_module('boxed')\n try:\n importlib.import_module(mod)\n except ImportError:\n END_POINT({\n 'status': 'invalid-import',\n 'module': mod,\n })\n comment('all modules successfully imported')\n\n # If the target attribute is a callable, simply return it\n target = data['target']\n if callable(target):\n return target\n\n # If it is a path string, we load the proper target function in the given\n # location.\n mod, _, func = data['target'].rpartition('.')\n try:\n mod = importlib.import_module(mod)\n target = getattr(mod, func)\n except ImportError as ex:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not import module %r. Maybe it must be passed it to '\n 'the \"imports\" argument.' % mod,\n })\n except AttributeError:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not find function \"%s\" in module %s' % (func, mod),\n })\n comment('target function loaded as %s' % funcname(target))\n return target",
"def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False",
"def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"",
"def valid_mikettle_mac(mac, pat=re.compile(r\"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac",
"def transfer_target_validation(source_target, destination_target):\n json_data = read_file('presqt/specs/targets.json', True)\n\n for data in json_data:\n if data['name'] == source_target:\n if destination_target not in data['supported_transfer_partners']['transfer_out']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer to '{}'.\".format(\n source_target, destination_target),\n status.HTTP_400_BAD_REQUEST)\n\n elif data['name'] == destination_target:\n if source_target not in data['supported_transfer_partners']['transfer_in']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer from '{}'.\".format(\n destination_target, source_target),\n status.HTTP_400_BAD_REQUEST)\n\n return True",
"def validate_target(func, *args, **kwargs):\n def inner(self, *args, **kwargs):\n # find the target param\n target_id = None\n if 'target_id' in kwargs and kwargs['target_id'] != None:\n target_id = kwargs['target_id']\n else:\n target_id = 0\n\n # if there was a target specified, check that it's valid\n if not self.target_is_valid(target_id):\n raise NoSuchTargetException()\n\n # call the function\n return func(self, *args, **kwargs)\n return inner",
"def verify_as_target(self, message_handler):",
"def verify_as_target(self, message_handler):\n\n self.platform.verify_as_target(message_handler)",
"def _validate_rule_target_name(name: str) -> None:\n if not name:\n raise common_exceptions.RuleTargetValidationError(\n \"A `name` field must be supplied.\"\n )",
"def test_host_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-b\", \"1.2.3.4.5\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-b\" / \"--bind-address\"' in invalid_res.output\n assert \"'host' is invalid in configuration\" in invalid_res.output",
"def supported_target(self, target, message_handler):\n\n # This default implementation checks that the architectures are the\n # same.\n return target is self",
"def supported_target(self, target, message_handler):\n\n # Android can never be a host.\n return False",
"def identifyTargetType(self, target):\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n ipFind = re.findall(ipAddress, target)\n if ipFind is not None and len(ipFind) > 0:\n return \"ip\"\n\n md5 = re.compile('[a-fA-F0-9]{32}', re.IGNORECASE)\n md5Find = re.findall(md5,target)\n if md5Find is not None and len(md5Find) > 0:\n return \"md5\"\n\n return \"hostname\"",
"def test_debugger_delete_invalid_target(self):\n target = lldb.SBTarget()\n self.assertFalse(target.IsValid())\n self.dbg.DeleteTarget(target)",
"def _target_is_valid_filename(self):\n filename = self.target\n if not filename_is_valid(filename):\n raise BadFilenameError(f\"{repr(filename)} must be a valid filename.\")\n return True",
"def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None",
"def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )",
"def validate_host(host):\n # FIXME: not convinced that pinging the machine is a good choice. it's definitely not needed for localhost\n route = os.system(\"ping -t 2 -c 1 \" + host)\n\n if route != 0:\n raise errs.IpError(host, 'Could not ping host: %s' % (host))\n\n try:\n # FIXME: i don't think there is any point in converting to ip address. socket.connect seems to handle machine names just fine and this is preferable since it is more human readable\n host = socket.gethostbyname(host)\n # FIXME: i don't think this line is doing anything. the previous line will error on an invalid name or malformed ip\n socket.inet_aton(host)\n except socket.error:\n raise errs.IpError(host, 'Please specify a valid host: %s' % (host))"
] | [
"0.7149191",
"0.64991635",
"0.64354825",
"0.63836634",
"0.62301147",
"0.62007254",
"0.6052976",
"0.6006137",
"0.59964573",
"0.5994269",
"0.59239537",
"0.58881646",
"0.58818513",
"0.5870957",
"0.5835328",
"0.57881117",
"0.575523",
"0.5737461",
"0.57259214",
"0.5655272",
"0.5633665",
"0.56186795",
"0.5612421",
"0.5601692",
"0.5570505",
"0.5554997",
"0.5472994",
"0.5472263",
"0.54712343",
"0.54681593"
] | 0.76750195 | 0 |
[authorize and initialize spotify client] | def init_auth_client(self):
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
token = util.prompt_for_user_token(
cfg['username'],
scope=cfg['scope'],
client_id=cfg['spotipy_client_id'],
client_secret=cfg['spotipy_client_secret'],
redirect_uri=cfg['spotipy_redirect_uri'])
sp = spotipy.Spotify(auth=token)
return sp, cfg['username'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))",
"def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)",
"def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()",
"def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))",
"def __init__(\n self,\n clientID,\n secretID,\n redirctURI,\n username\n ):\n\n print('SpotifClient starts...')\n \n self.client_id = clientID\n self.secret_id = secretID\n self.redirect_uri = redirctURI\n self.username = username\n self._isConnected = False\n\n #self.Connect()",
"def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False",
"async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return",
"def authorize():\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response",
"def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp",
"def authorize():\n encoded_auth = base64.b64encode(\n (os.environ[\"SPOTIFY_CLIENT_ID\"] + ':' + os.environ[\"SPOTIFY_CLIENT_SECRET\"]).encode())\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_auth.decode(\"utf-8\"))\n }\n\n response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},\n headers=headers).text\n return json.loads(response)",
"def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_resp[\"SPOTIFY_CLIENT_ID\"]\n spotify_client_secret = json_ws_resp[\"SPOTIFY_CLIENT_SECRET\"]\n spotify_scope = json_ws_resp[\"SPOTIFY_SCOPE\"]\n try:\n ws_resp = aisCloud.key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n AIS_SPOTIFY_TOKEN = json.loads(key)\n except:\n AIS_SPOTIFY_TOKEN = None\n _LOGGER.info(\"No AIS_SPOTIFY_TOKEN\")\n except Exception as e:\n _LOGGER.error(\"No spotify oauth info: \" + str(e))\n return False\n\n cache = hass.config.path(DEFAULT_CACHE_PATH)\n gate_id = ais_global.get_sercure_android_id_dom()\n oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,\n scope=spotify_scope, cache_path=cache, state=gate_id)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token in cache;\")\n if AIS_SPOTIFY_TOKEN is not None:\n with open(cache, 'w') as outfile:\n json.dump(AIS_SPOTIFY_TOKEN, outfile)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token; run configurator\")\n async_request_configuration(hass, config, oauth)\n return True\n\n if hass.data.get(DOMAIN):\n configurator = hass.components.configurator\n configurator.request_done(hass.data.get(DOMAIN))\n del hass.data[DOMAIN]\n\n # register services\n data = hass.data[DOMAIN] = SpotifyData(hass, oauth)\n\n # service = configured_service(hass)\n\n @asyncio.coroutine\n def search(call):\n _LOGGER.info(\"search \" + str(call))\n yield from data.process_search_async(call)\n\n def select_track_name(call):\n _LOGGER.info(\"select_track_name\")\n data.process_select_track_name(call)\n\n def change_serive(call):\n _LOGGER.info(\"change_serive\")\n data.change_serive(call)\n\n hass.services.async_register(DOMAIN, 'search', search)\n hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)\n hass.services.async_register(DOMAIN, 'change_serive', change_serive)\n\n return True",
"def authorize(self):\n\t\ttry:\n\t\t\tauth_url = 'https://accounts.spotify.com/api/token'\n\t\t\theaders={}\n\t\t\tdata={}\n\n\t\t\tdata_string = f\"{self.client_id}:{self.client_secret}\"\n\n\t\t\tdata_bytes = data_string.encode(\"ascii\")\n\t\t\tbase_bytes = base64.b64encode(data_bytes)\n\t\t\tbase_message = base_bytes.decode(\"ascii\")\n\n\t\t\theaders['Authorization'] = f\"Basic {base_message}\"\n\n\t\t\tdata = parse.urlencode({\"grant_type\": \"client_credentials\"})\n\t\t\tdata = data.encode('ascii')\n\n\t\t\treq = request.Request(auth_url,data=data, headers=headers)\n\t\t\tlogging.info(\"Successfully called Spotify token API!\")\n\t\texcept:\n\t\t\tlogging.error(\"Failed to create authorization request!\")\n\t\t\treturn False\n\t\t\t\n\t\tif req is not None:\n\t\t\ttry:\n\t\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\texcept error.URLError as e:\n\t\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\t\tlogging.error(response)\n\t\t\t\treturn False\n\t\t\n\t\ttry:\n\t\t\t_json = json.loads(response)\n\t\t\tself.token = _json[\"access_token\"]\n\t\t\tlogging.info(\"Successfully received token from Spotify!\")\n\t\texcept:\n\t\t\tlogging.error(\"Could not fetch token from response!\")\n\t\t\treturn False\n\t\t\t\n\t\treturn True",
"def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}",
"def init_api():\n global soundcloud\n import json\n \n SECRETS_VERSION = 1\n \n # Load secrets file\n if os.path.exists(config.token_cache):\n with open(config.token_cache, 'r', encoding='utf-8') as f:\n secrets = json.load(f)\n else:\n secrets = {}\n \n # Try to reuse the cached access token\n if secrets\\\n and secrets['version'] == SECRETS_VERSION\\\n and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\\\n and secrets['username'] == config.username:\n \n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n access_token=secrets['access_token']\n )\n return\n \n # Get a new access token\n logging.info('Getting a new access token') \n try:\n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n username=config.username,\n password=config.password\n )\n except HTTPError as e:\n if e.response.status_code == 401:\n logging.critical('Incorrect API key, login or password. Please, edit config.py.')\n sys.exit(1)\n else:\n raise\n \n # Save the token\n secrets = {\n 'version': SECRETS_VERSION,\n 'username': config.username,\n 'access_token': soundcloud.access_token,\n 'access_token_acquired_at': time(),\n 'access_token_expires_in': soundcloud.token.expires_in,\n }\n \n with open(config.token_cache, 'w', encoding='utf-8') as f:\n secrets = json.dump(secrets, f, indent='\\t', ensure_ascii=False)",
"def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])",
"def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret",
"def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None",
"def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret",
"def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")",
"def _authorize(self):\n auth = tweepy.OAuthHandler(self.keys[\"consumer_key\"], self.keys[\"consumer_secret\"])\n auth.set_access_token(self.keys[\"access_token\"], self.keys[\"access_token_secret\"])\n return tweepy.API(auth)",
"def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)",
"def __init__(self, credentials):\n self.credentials = credentials\n http = httplib2.Http()\n http = self.credentials.authorize(http)\n self.service = build(\"drive\", \"v2\", http=http)",
"def do_setup(self, context):\n self.restclient = rest_client.RestClient(self.configuration)\n return self.restclient.login()",
"def init_api(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(self.gdrive_config.TOKEN_PICK_PATH):\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.gdrive_config.CREDENTIAL_PATH, self.gdrive_config.SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service",
"def __init__(__self__, *,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n metadata_url: Optional[pulumi.Input[str]] = None,\n scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if client_id is not None:\n pulumi.set(__self__, \"client_id\", client_id)\n if client_secret is not None:\n pulumi.set(__self__, \"client_secret\", client_secret)\n if metadata_url is not None:\n pulumi.set(__self__, \"metadata_url\", metadata_url)\n if scopes is not None:\n pulumi.set(__self__, \"scopes\", scopes)",
"def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']",
"def _create_user_object(self) -> None:\n\n token = util.prompt_for_user_token(self._USERNAME, self.scope, self._CLIENT_ID, self._CLIENT_SECRET, self.redirect_uri)\n self.spotipyObject = spotipy.Spotify(auth=token)",
"async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True",
"def __init__(self, access_key, secret_key, **kwargs):\r\n pass",
"def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )"
] | [
"0.7275522",
"0.7169035",
"0.7160154",
"0.7054514",
"0.70088327",
"0.6976066",
"0.6880722",
"0.6798923",
"0.6781801",
"0.67027813",
"0.66602165",
"0.6602743",
"0.64574254",
"0.6410066",
"0.6331362",
"0.63175875",
"0.62989295",
"0.6185303",
"0.6183346",
"0.6170306",
"0.6090339",
"0.60597277",
"0.60455346",
"0.60439485",
"0.60426766",
"0.6041466",
"0.6040368",
"0.60253686",
"0.6013",
"0.59943753"
] | 0.78885454 | 0 |
[creates a new playlist with given name, desc with given limts] | def create_new_playlist(self, name, desc=''):
pl_names, _, _ = self.list_playlists()
if name in pl_names:
self.logger.debug(
'Playlist Name Already Exists, please use another name')
else:
pl = self.sp.user_playlist_create(
self.user, name, public=False, description=desc)
self.sp.user_playlist_change_details(
self.user, pl['id'], collaborative=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")",
"def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')",
"def create_playlist(self, data):\n pass",
"def create_playlist(self, playlist_name):\n #self._video_playlist.name=playlist_name\n #self._video_playlist.caseless=playlist_name.lower()\n #print(f\"Successfully created new playlist: {self._video_playlist.name}\")\n if playlist_name.lower() not in self.playlists:\n self.playlists[playlist_name.lower()]=[]\n print(\"Successfully created new playlist: {0}\".format(playlist_name))\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")",
"def create_playlist(self, playlist_name):\n for playlist in self.playlists.keys():\n if playlist_name.upper() == playlist.upper():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n break\n else:\n self.playlists[playlist_name]=[]\n print(\"Successfully created new playlist: \" + playlist_name)\n # print(\"create_playlist needs implementation\")",
"def create_playlist(self, request):\n # TODO: Max amount of playlists at 20 for a user\n user = Account.find_by_id(request.userid)\n if user is None:\n print \"User not found\" \n return PlaylistResponse(errmsg=\"User ID not found\")\n new_pl = Playlist.add_new_playlist(user.key, request.name)\n return PlaylistResponse(pid=new_pl.key.id())",
"def create_playlist(self, playlist_name):\n if playlist_name.upper() in self.playlist.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist[playlist_name.upper()] = []\n self.playlist_list.append(playlist_name)\n print(f\"Successfully created new playlist: {playlist_name}\")",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self.playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist_names[playlist_name.lower()] = playlist_name\n self.playlists[playlist_name.lower()] = []\n print(\"Successfully created new playlist:\", playlist_name)",
"def create_playlist(self, playlist_name):\n new_playlist_id = playlist_name.lower()\n if new_playlist_id in self.playlists.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n\n new_playlist = Playlist(playlist_name)\n self.playlists[new_playlist_id] = new_playlist\n print(f\"Successfully created new playlist: {playlist_name}\")",
"def playlist_create(self, user_id: str, name: str, public: bool = True,\n description: str = ''):\n payload = {\n 'name': name,\n 'public': public,\n 'description': description\n }\n return self._post(f'users/{user_id}/playlists', payload=payload)",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)",
"def create_playlist(self, playlist_name):\n playlist_name = Playlist()\n if self != playlist_name:\n print(f\"successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")",
"def create_playlist(self, title, description=\"\"):\n if self.youtube is None:\n self.youtube = __get_client()\n # This code creates a new, private playlist in the authorized user's\n # channel.\n playlists_insert_response = self.youtube.playlists().insert(\n part=\"snippet,status\",\n body = {\n \"snippet\": {\n \"title\": title,\n \"description\": description\n },\n \"status\": {\n \"privacyStatus\": \"private\"\n }\n }\n ).execute()\n return playlists_insert_response",
"def spotify_create_playlist(\n playlist_name: str,\n access_token: str,\n user_spotify_id: str,\n public: bool = True,\n description: str = None,\n) -> str:\n headers = {\n \"Authorization\": \"Bearer {}\".format(access_token),\n \"Content-Type\": \"application/json\",\n }\n body = {\"name\": playlist_name, \"public\": public}\n if description is not None:\n body[\"description\"] = description\n response = requests.post(\n \"https://api.spotify.com/v1/users/{}/playlists\".format(user_spotify_id),\n headers=headers,\n json=body,\n )\n if response.status_code != 200 and response.status_code != 201:\n return \"Error {}\".format(response.text)\n return response.json()[\"id\"]",
"def create_playlist(self, name):\n\n user_id = self.get_current_user()\n endpoint = f\"/users/{user_id}/playlists\"\n headers = self.headers\n headers.update()\n response = self._send(\n endpoint,\n \"POST\",\n extra_headers={\"Content-Type\": \"application/json\"},\n data=json.dumps({\"name\": name, \"public\": False})\n )\n playlist_id = response.json()[\"id\"]\n return playlist_id",
"def create_playlist(user_id, sp, recommendations, name, description):\r\n \r\n # Get current user ID\r\n current_user = sp.current_user()\r\n current_user_id = current_user['id']\r\n \r\n # Get list of track ID's\r\n track_id_list = list(recommendations['id'].values)\r\n \r\n # Create Empty playlist\r\n sp.user_playlist_create(user = user_id, \r\n name = name, \r\n description = description)\r\n \r\n # Get playlist ID\r\n playlists = sp.current_user_playlists(limit=1)\r\n playlist_name = playlists['items'][0]['name']\r\n playlist_id = playlists['items'][0]['id']\r\n \r\n # Add tracks to playlist\r\n sp.user_playlist_add_tracks(user = current_user_id, \r\n playlist_id = playlist_id, \r\n tracks = track_id_list)\r\n \r\n # Check if playlist is succesfully created.\r\n if name == playlist_name:\r\n return '**Playlist was succesfully created on your Spotify account.**'\r\n else:\r\n return '**Playlist was not succesfully created.**'",
"def newpl(self, args):\n if not args:\n self.err_print('One argument required')\n return\n elif len(args) == 1:\n plname = args[0]\n\n if self.pl_exists(plname) >= 0:\n self.err_print('Playlist \"{}\" already exists'.format(plname))\n return\n\n playlist.Playlist.init_pl(plname, self.ui.db)\n newpl = menu.Music_menu(win=self.ui.rightwin.win,\n data=playlist.Playlist(name=plname, db=self.ui.db),\n form=config.SONG_DISP,\n palette=self.ui.palette[0], ui=self.ui)\n else:\n plname = args[0]\n plfile = args[1]\n if not os.path.isfile(plfile):\n self.err_print('File does not exist: {}.'.format(plfile))\n return\n\n if self.pl_exists(plname) >= 0:\n self.err_print('Playlist \"{}\" already exists'.format(plname))\n return\n\n playlist.init_pl(plname, self.ui.db)\n newpl = menu.Menu(win=self.ui.rightwin.win,\n data=playlist.Playlist(name=plname, db=self.ui.db),\n form=config.SONG_DISP,\n cursor_colour=config.CURSOR[0],\n highlight_colour=config.HIGHLIGHT_COLOUR[0],\n normal_colour=config.NORMAL[0])\n\n newpl.insert_from_file(plfile)\n\n self.ui.leftwin.insert(newpl)\n self.ui.leftwin.disp()",
"def user_playlist_create(self, user, name, public=True, description=\"\", **kwargs):\n # pylint: disable=no-member\n data = {\"name\": name, \"public\": public, \"description\": description}\n return self._post(\n API.PLAYLISTS.value.format(user_id=user), payload=data, **kwargs\n )",
"def create_playlist(self, playlist_name: str, song_ids: List[str]) -> str:\n user = self.init_user()\n user_id = user.me()['id']\n playlist_data = user.user_playlist_create(\n user=user_id, name=playlist_name, public=True)\n user.playlist_add_items(playlist_data['id'], song_ids)\n playlist_link = playlist_data['external_urls']['spotify']\n return playlist_link",
"def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)",
"def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]",
"def create_dp_playlist(msg):\n print ''\n print '------'\n print '***Dynamic Programming method***'\n print 'Original message: ', msg\n # Normalize and tokenize message and use it to query songs\n words = normalize(msg).split(' ')\n songs = ngram_search(words)\n # Form playlist and print\n playlist = dp_parse(normalize(msg), songs=songs)\n print 'Playlist: '\n print '# | SONG TITLE | ARTIST | ALBUM'\n for i, p in enumerate(playlist[2]):\n song_info = '{0} | {1} | {2}'.format(p.Title, ', '.join(p.Artists),\n p.Album)\n print '{0}. | '.format(i + 1) + song_info",
"def create_playlist(self):\n playlist=self.sp.user_playlist_create(user=self.username,name=self.nameOfPlaylist,description=self.description)\n return playlist['id']",
"def __init__(self, pname, pmax, plist):\n\n #the player has to have... \n self.name = pname\n self.max_items = pmax\n self.items = plist",
"def playlist_add(nums, playlist):\n nums = _parse_multi(nums)\n\n if not g.userpl.get(playlist):\n playlist = playlist.replace(\" \", \"-\")\n g.userpl[playlist] = Playlist(playlist)\n\n for songnum in nums:\n g.userpl[playlist].songs.append(g.model.songs[songnum - 1])\n dur = g.userpl[playlist].duration\n f = (len(nums), playlist, g.userpl[playlist].size, dur)\n g.message = F('added to saved pl') % f\n\n if nums:\n save_to_file()\n\n g.content = generate_songlist_display()",
"def create_new_pl(self, params):\n name = params[ONE]\n user = params[ZERO]\n songs = params[2].split('&')\n msg = self.db.create_new_pl(songs, name, user)\n self.send_message(msg)",
"def add_playlist(self, names, printQueue=False):\n idtoadd = [self.listIDs[n] for n in names]\n self.spotify.add_playlist_to_queue(idtoadd)\n\n if printQueue:\n self.console.print('This is your current queue: ')\n self.console.print(self.spotify.queue.loc[:10, ['name', 'album', 'artist']])",
"def playlist(self, channel_list, limit, part='contentDetails', only_id=1):\n playlist_details = {}\n key = self.keylist[self.keyindex]\n url_pi = 'https://www.googleapis.com/youtube/v3/playlistItems/'\n\n if limit <= 50 and limit > 0:\n maxResults = limit\n else:\n maxResults = 50\n\n for chnlid in channel_list:\n vidcount = initial = 0\n nextPageToken = ''\n results = []\n # print('UU'+chnlid[2:])\n try:\n while nextPageToken or initial == 0:\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n\n response = request_handler(self, url_pi, params=querystring, wait=5) #ids=chnlid)\n # print(\"#\"*5, response.json())\n # print(response.json())\n if response.get('error'):\n while response['error']['errors'][0]['reason'] == 'quotaExceeded' or \\\n response['error']['errors'][0]['reason'] == 'dailyLimitExceeded':\n key = keychange(self)\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n response = request_handler(self, url_pi, params=querystring, wait=5, ids=chnlid)\n\n if response.get('error'):\n playlist_details.update({chnlid: 'error'})\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chnlid:'error'}]\n break\n\n if response.get('Interneterror'):\n results.append(response)\n #print(playlist_details)\n break\n\n if limit == -1:\n limit = response['pageInfo']['totalResults']\n # print(response,response.text)\n \n if only_id == 1:\n for i in range(response['pageInfo']['resultsPerPage']):\n try:\n results.append(response['items'][i]['contentDetails']['videoId'])\n except:\n pass\n else:\n results.append(response['items'])\n nextPageToken = response.get('nextPageToken')\n vidcount += len(response['items'])\n if vidcount >= limit:\n break\n print(\"Video id found: \", chnlid, \" : \", vidcount)\n #{'error':[]}\n \n initial += 1\n \n playlist_details.update({chnlid:results})\n\n except Exception as e:\n print(\"Error: \", e, \" : \", traceback.print_exc())\n playlist_details[chnlid] = 'error'\n break\n\n return playlist_details",
"def NewListFromParameters(self, name:str, desc:str) -> AbstractItemList:\n ret = self.NewList()\n ret._name = name\n ret._desc = desc\n return ret",
"def new_queue(self, params, maxUsers=1):\n self.spotify.reset_queue()\n\n #Extract parameters\n mood = params[0]\n users = []\n for i in range(maxUsers):\n if len(params[i + 1]) > 0:\n users.append(params[i + 1])\n\n #Add default host if only one guest is present\n # if len(users) == 1:\n # users.append('Paolo')\n\n #Lists to load\n names = []\n for n in self.listIDs.keys():\n for u in users:\n if len(mood) > 0:\n if u + ':' + mood in n:\n names.append(n)\n else:\n if 'top:' + u in n:\n names.append(n)\n\n\n self.add_playlist(names)"
] | [
"0.7272547",
"0.68430287",
"0.673538",
"0.6641419",
"0.661609",
"0.65667766",
"0.6543431",
"0.6541754",
"0.6530266",
"0.6512793",
"0.6474164",
"0.64047396",
"0.63702667",
"0.6338417",
"0.6316106",
"0.6193181",
"0.617585",
"0.61417544",
"0.60676473",
"0.6046706",
"0.5912048",
"0.5755235",
"0.5707441",
"0.5701937",
"0.56886506",
"0.56847966",
"0.5667215",
"0.5581864",
"0.5569559",
"0.5500984"
] | 0.7137164 | 1 |
Method which calculates TS Percentage metric for a player | def set_ts_percentage(self):
bx = self.get_standard_stats()
ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"])
tcInt = float(bx["t2p_int"] + bx["t3p_int"])
tsAttempts = float(tcInt + (0.44*float(bx["tl_int"])))
result = 0.00
if tsAttempts > 0.00:
result = (ptos/(2*tsAttempts))*100
self.ts_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pct(self):\n\t\treturn self.bottle.pct()",
"def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def getPercent(*args):",
"def getPercent(*args):",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None",
"def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp",
"def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def percentage(count, total):\n return count / total * 100",
"def get_percent(self):\n return self.percent",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)",
"def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str",
"def update_percent(self):",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)"
] | [
"0.67912775",
"0.67874295",
"0.66820705",
"0.6615735",
"0.6549017",
"0.65139776",
"0.65139776",
"0.6465816",
"0.64634633",
"0.6446837",
"0.64173675",
"0.64117974",
"0.6390257",
"0.63858217",
"0.6356354",
"0.6316925",
"0.6298419",
"0.62818795",
"0.6281119",
"0.6268235",
"0.6221657",
"0.62200534",
"0.6174268",
"0.6174268",
"0.61564356",
"0.61564356",
"0.61437255",
"0.6129586",
"0.61163414",
"0.6111541"
] | 0.745896 | 0 |
Method which calculate USG% for each player from each team | def set_usg_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
tcInt = bx["t2p_int"] + bx["t3p_int"]
a = tcInt + (Decimal('0.44')*bx["tl_int"]) + bx["turnovers"]
b = team["minutes"]/5
c = (team["t2p_int"] + team["t3p_int"]) + (Decimal('0.44')*team["tl_int"]) + team["turnovers"]
result = 0.00
if bx["minutes"] > 0:
result = ((Decimal(a)*Decimal(b))/(bx["minutes"]*c))*100
self.usg_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"def simulate(team, N=100):\n\n total_score = 0.0\n for player in team:\n simulation_score = []\n for i in range(N):\n simulation_score.append(get_player_score(player))\n total_score += np.mean(simulation_score)\n\n return total_score",
"def get_team_results(usrs, sched):\t\n\t\n\ttotal_consistency = 0\n\ttotal_completion = 0\n\tfor user in usrs:\n\t\tresult = get_consistency(user, sched)\n\t\t\n\t\ttotal_consistency += result[\"consistency\"]\n\t\ttotal_completion += result[\"completion\"]\n\t\n\tteam_consistency = 0\n\tteam_completion = 0\n\t\t\n\tif(len(usrs) != 0):\n\t\tteam_consistency = total_consistency / float(len(usrs))\n\t\tteam_completion = total_completion / float(len(usrs))\n\t\t\n\treturn { \"consistency\" : team_consistency, \"completion\" : team_completion }",
"def getStats(population, masterList):\n for team in population:\n for i in range(13): #13 are the number of roster spots?\n team.totHr += masterList[team.roster[i]].hr\n team.totAvg += masterList[team.roster[i]].avg\n team.totRuns += masterList[team.roster[i]].runs\n team.totSb += masterList[team.roster[i]].sb\n team.totRbi += masterList[team.roster[i]].rbi\n if i == 12:\n team.totAvg = team.totAvg / 13\n return population",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def main(simulations, userschoice):\n # The teams data are obtained from FIFA statistics\n # Team Name, Attack, Defence\n quarters = ['quarter1', 'quarter2', 'quarter3', 'quarter4', 'quarter5', 'quarter6', 'quarter7', 'quarter8']\n semifinalists = ['semifinalist1', 'semifinalist2', 'semifinalist3', 'semifinalist4']\n finalists = ['finalist1', 'finalist2']\n\n df = pd.read_csv('FifaRankings.csv', index_col=\"Ranking\")\n a_set = set()\n while True:\n a_set.add(randint(42, 85))\n if len(a_set) == 32:\n break\n lst1 = sorted(list(a_set), reverse=True)\n\n a_set = set()\n while True:\n a_set.add(randint(38, 83))\n if len(a_set) == 32:\n break\n lst2 = sorted(list(a_set), reverse=True)\n print(\"\\n\")\n df['Attack'] = lst1\n df['Defence'] = lst2\n a = list(df[\"Team\"])\n\n avgScored = 0\n avgConceded = 0\n avgScored = df['Attack'].sum()\n avgConceded = df['Defence'].sum()\n\n avgScored = avgScored / len(df)\n avgConceded = avgConceded / len(df)\n print(\"\\n\")\n avgattack = []\n avgdefense = []\n\n for i in range(1, 33):\n if df['Matches Played'][i] != 0:\n win_rate = (df['WorldCup Wins'][i] / df['Matches Played'][i])\n else:\n win_rate = 0\n avgattack.append((df['Attack'][i] / avgScored) + win_rate)\n avgdefense.append((df['Defence'][i] / avgConceded) + win_rate)\n\n df['Avg Attack'] = avgattack\n df['Avg Defense'] = avgdefense\n\n\n teamstats=[]\n for i in range(1,len(df)+1):\n teaminfo=[]\n teaminfo = (df[\"Team\"][i], df['Avg Attack'][i], df['Avg Defense'][i])\n teaminfo=list(teaminfo)\n teamstats.append(teaminfo)\n\n germany = WorldCupTeam(\"GERMANY\", teamstats)\n brazil = WorldCupTeam(\"BRAZIL\", teamstats)\n belgium = WorldCupTeam(\"BELGIUM\", teamstats)\n portugal = WorldCupTeam(\"PORTUGAL\", teamstats)\n argentina = WorldCupTeam(\"ARGENTINA\", teamstats)\n france = WorldCupTeam(\"FRANCE\", teamstats)\n switzerland = WorldCupTeam(\"SWITZERLAND\", teamstats)\n spain = WorldCupTeam(\"SPAIN\", teamstats)\n russia = WorldCupTeam(\"RUSSIA\", teamstats)\n japan = WorldCupTeam(\"JAPAN\", teamstats)\n polland=WorldCupTeam(\"POLLAND\", teamstats)\n korea_republic = WorldCupTeam(\"KOREA REPUBLIC\", teamstats)\n england = WorldCupTeam(\"ENGLAND\", teamstats)\n denmark= WorldCupTeam(\"DENMARK\", teamstats)\n peru= WorldCupTeam(\"PERU\", teamstats)\n tunisia=WorldCupTeam(\"TUNISIA\", teamstats)\n mexico = WorldCupTeam(\"MEXICO\", teamstats)\n colombia = WorldCupTeam(\"COLOMBIA\", teamstats)\n uruguay = WorldCupTeam(\"URUGUAY\", teamstats)\n croatia = WorldCupTeam(\"CROATIA\", teamstats)\n australia = WorldCupTeam(\"AUSTRALIA\", teamstats)\n iceland=WorldCupTeam(\"ICELAND\", teamstats)\n sweden=WorldCupTeam(\"SWEDEN\", teamstats)\n costa_rica = WorldCupTeam(\"COSTA RICA\", teamstats)\n senegal=WorldCupTeam(\"SENEGAL\", teamstats)\n serbia=WorldCupTeam(\"SERBIA\", teamstats)\n morrocco=WorldCupTeam(\"MORROCCO\", teamstats)\n egypt=WorldCupTeam(\"EGYPT\", teamstats)\n nigeria = WorldCupTeam(\"NIGERIA\", teamstats)\n saudi_arabia=WorldCupTeam(\"SAUDI ARABIA\", teamstats)\n panama=WorldCupTeam(\"PANAMA\", teamstats)\n iran = WorldCupTeam(\"IRAN\", teamstats)\n\n\n #INPUT USERS CHOICE FOR FIXED CHOICE\n choices= [\"random\", \"Random\", \"RANDOM\"]\n choicess = [\"fixed\", \"Fixed\", \"FIXED\"]\n if userschoice in choices:\n countries = [germany, brazil, belgium, portugal, argentina, france, switzerland, spain, russia, japan, polland,\n korea_republic, england, denmark, peru, tunisia, mexico, colombia, uruguay, croatia, australia,\n iceland, sweden, costa_rica, senegal, serbia, morrocco, egypt, nigeria, saudi_arabia, panama, iran]\n finalresults = {}\n\n GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH = ([] for i in range(8))\n\n Groups = [GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH]\n for i in Groups:\n for j in range(4):\n teamname = choice(countries)\n i.append(teamname)\n countries.remove(teamname)\n\n print(\"DRAWS for the WorldCup 2018 are:\")\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i + 1, \"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(Groups[0])\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(Groups[1])\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(Groups[2])\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(Groups[3])\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(Groups[4])\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(Groups[5])\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(Groups[6])\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(Groups[7])\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n r16 = [groupA.first_qualified, groupA.second_qualified, groupB.first_qualified, groupB.second_qualified,\n groupC.first_qualified, groupC.second_qualified, groupD.first_qualified, groupD.second_qualified,\n groupE.first_qualified, groupE.second_qualified, groupF.first_qualified, groupF.second_qualified,\n groupG.first_qualified, groupG.second_qualified, groupH.first_qualified, groupH.second_qualified]\n\n\n GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW =([] for i in range(8))\n\n round16groups = [GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW]\n\n for k in round16groups:\n for j in range(2):\n teamname = choice(r16)\n k.append(teamname)\n r16.remove(teamname)\n\n for i in range(8):\n quarters[i]=WorldCupMatch(round16groups[i][0], round16groups[i][1], False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n quarterfinal = [quarters[0], quarters[1], quarters[2], quarters[3], quarters[4], quarters[5], quarters[6],\n quarters[7]]\n GroupA1, GroupB1, GroupC1, GroupD1 = ([] for i in range(4))\n\n quarterfinalgroups = [GroupA1, GroupB1, GroupC1, GroupD1]\n\n i = 0\n for i in quarterfinalgroups:\n for j in range(2):\n teamname = choice(quarterfinal)\n i.append(teamname)\n quarterfinal.remove(teamname)\n\n for i in range(4):\n semifinalists[i] = WorldCupMatch(quarterfinalgroups[i][0], quarterfinalgroups[i][1], False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n\n semifinal = [semifinalists[0], semifinalists[1], semifinalists[2], semifinalists[3]]\n GroupP1, GroupQ1 = ([] for i in range(2))\n semifinalgroups = [GroupP1, GroupQ1]\n\n i = 0\n for i in semifinalgroups:\n for j in range(2):\n teamname = choice(semifinal)\n i.append(teamname)\n semifinal.remove(teamname)\n\n for i in range(2):\n finalists[i] = WorldCupMatch(semifinalgroups[i][0], semifinalgroups[i][1], False).winner\n # Finals\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalists[0], finalists[1], False).winner\n print(\"\\n\")\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n ro=(finalresults[key] / simulations) * 100\n print(str(ro) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n\n\n elif userschoice in choicess:\n\n print(\"\\n\")\n finalresults = {}\n groupA1 = [russia , saudi_arabia,egypt, uruguay]\n groupB1 = [portugal, spain, morrocco, iran]\n groupC1 = [france, australia, peru, denmark]\n groupD1 = [argentina, iceland, croatia, nigeria]\n groupE1 = [brazil, switzerland, costa_rica, serbia]\n groupF1 = [germany, mexico, sweden, korea_republic]\n groupG1 = [belgium, panama, tunisia, england]\n groupH1 = [polland, senegal, colombia, japan]\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i+1 ,\"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(groupA1)\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(groupB1)\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(groupC1)\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(groupD1)\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(groupE1)\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(groupF1)\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(groupG1)\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(groupH1)\n print(\"Qualifies teams:\", groupH.first_qualified.name)\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n\n quarter1 = WorldCupMatch(groupA.first_qualified, groupA.second_qualified, False).winner\n quarter2 = WorldCupMatch(groupB.first_qualified, groupB.second_qualified, False).winner\n quarter3 = WorldCupMatch(groupC.first_qualified, groupC.second_qualified, False).winner\n quarter4 = WorldCupMatch(groupD.first_qualified, groupD.second_qualified, False).winner\n quarter5 = WorldCupMatch(groupE.first_qualified, groupE.second_qualified, False).winner\n quarter6 = WorldCupMatch(groupF.first_qualified, groupF.second_qualified, False).winner\n quarter7 = WorldCupMatch(groupG.first_qualified, groupG.second_qualified, False).winner\n quarter8 = WorldCupMatch(groupH.first_qualified, groupH.second_qualified, False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n\n semifinalist1 = WorldCupMatch(quarter1, quarter2, False).winner\n semifinalist2 = WorldCupMatch(quarter3, quarter4, False).winner\n semifinalist3 = WorldCupMatch(quarter5, quarter6, False).winner\n semifinalist4 = WorldCupMatch( quarter7, quarter8, False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n finalist1 = WorldCupMatch(semifinalist1, semifinalist2, False).winner\n finalist2 = WorldCupMatch(semifinalist3, semifinalist4, False).winner\n\n # Final\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalist1, finalist2, False).winner\n print(\"\\n\")\n\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n rou = (finalresults[key] / simulations) * 100\n print(str(rou) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n else:\n print(\"Please enter correct input and try again\")\n pass",
"def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))",
"def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def _get_percentages(games_table: pd.DataFrame, stats_table: pd.DataFrame,\n grouping_column: str) -> pd.DataFrame:\n stats_table[\n [\n \"total_free_throws_achieved\",\n \"total_free_throws_attempted\",\n \"total_two_point_achieved\",\n \"total_two_point_attempted\",\n \"total_three_point_achieved\",\n \"total_three_point_attempted\",\n ]\n ] = (\n games_table[\n [\n grouping_column,\n \"free_throws_achieved\",\n \"free_throws_attempted\",\n \"two_point_achieved\",\n \"two_point_attempted\",\n \"three_point_achieved\",\n \"three_point_attempted\",\n ]\n ]\n .groupby(grouping_column)\n .sum()\n .reset_index()\n .drop(grouping_column, axis=1)\n )\n\n stats_table[\"free_throws_pct\"] = (\n stats_table[\"total_free_throws_achieved\"] / stats_table[\"total_free_throws_attempted\"]\n )\n stats_table[\"two_point_pct\"] = (\n stats_table[\"total_two_point_achieved\"] / stats_table[\"total_two_point_attempted\"]\n )\n stats_table[\"three_point_pct\"] = (\n stats_table[\"total_three_point_achieved\"] / stats_table[\"total_three_point_attempted\"]\n )\n return stats_table",
"def processed_overall(self):\n self.processed_overall = (\n self.combine_both_winning_losing_games_stats\n .rename(columns={\"WTeamID\":\"TeamID\"})\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )",
"def get_player_stats_from_game(team, year, week):",
"def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval",
"def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def advancedStats():",
"def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)",
"def get_stats(self):\n\n win_points = 0\n lose_points = 0\n\n for username in self.bets:\n bet_for_win, points = self.bets[username]\n if bet_for_win:\n win_points += points\n else:\n lose_points += points\n\n return win_points, lose_points",
"def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def gamestats(self, table, curr_team):\n\n # Drop unneeded header \n tmp = table.iloc[1:,]\n # Fix the column names by reading line 0\n tmp.columns = [x.replace(\" \", \"\").replace(\"/\",\"\").replace(\".\",\"\") for x in tmp.iloc[0]]\n # Drop row zero which held the header row\n tmp = tmp.drop(tmp.index[0])\n # Forward fill the dates for defensive split later \n tmp['Date'].fillna(method='ffill', inplace = True)\n # Add in the team \n tmp['Team'] = curr_team\n # Create an offense/defense variable\n tmp['OffenseDefense'] = tmp['Opponent']\n # If it's not a defensive total then it's offense - set that in the offensedefense variable\n tmp['OffenseDefense'] = tmp['OffenseDefense'].apply(lambda x: \"Defense\" if x == \"Defensive Totals\" else \"Offense\")\n # Set the defensive totals in the opponent varaible to nullls\n tmp['Opponent'] = tmp['Opponent'].apply(lambda x: None if x == \"Defensive Totals\" else x)\n # Forward fill the opponents in for analysis later\n tmp['Opponent'].fillna(method='ffill', inplace = True)\n # Forward fill the results in for analysis later \n tmp['Result'].fillna(method='ffill', inplace = True)\n return tmp",
"def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'",
"def test_get_team_strength(self):\n pass",
"def update_mean_and_count(self, strat_profile, game_outcome):\n self.total_interactions += 1\n for k in range(self.G.n_players):\n self.mu[k][strat_profile] *= self.count[k][strat_profile]\n self.mu[k][strat_profile] += game_outcome[k]\n self.count[k][strat_profile] += 1\n self.mu[k][strat_profile] /= self.count[k][strat_profile]\n\n for s in self.V:\n self.count_history[s].append(self.count[0][s] /\n float(self.total_interactions))",
"def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score"
] | [
"0.67434204",
"0.6500815",
"0.6359718",
"0.63065845",
"0.63019216",
"0.6301762",
"0.6299715",
"0.6148283",
"0.6126874",
"0.6103746",
"0.59502584",
"0.59436804",
"0.59398586",
"0.5932481",
"0.58808863",
"0.58741057",
"0.58336693",
"0.58299714",
"0.57860583",
"0.5785277",
"0.57851714",
"0.5772505",
"0.5765083",
"0.5754176",
"0.57456493",
"0.5744941",
"0.57435244",
"0.57411337",
"0.5740637",
"0.57328707"
] | 0.6911841 | 0 |
Method which calculate Total Rebound Percentage | def set_total_reb_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
player_rebounds = bx["reb_def"] + bx["reb_of"]
team_rebounds = team["reb_def"] + team["reb_of"]
opp_team_rebounds = opp_team["reb_def"] + opp_team["reb_of"]
result = 0.00
try:
if bx["minutes"] > 0 and bx["minutes"] > 0:
result = ((player_rebounds * (team["minutes"]/5)) / (bx["minutes"] * (team_rebounds + opp_team_rebounds)))*100
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
self.total_reb_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_of\"] + opp_team[\"reb_def\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n self.total_reb_of_percentage = \"%.2f\" % round(result, 2)",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def set_total_reb_def_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_def\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_def\"] + opp_team[\"reb_of\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_def_percentage = \"%.2f\" % round(result, 2)",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def percentage(count, total):\n return count / total * 100",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number",
"def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0",
"def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0",
"def _calculate_result(found, total):\n return (found * 100) / total",
"def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty",
"def rF(count, total):\n\treturn float(count)/float(total)",
"def getPercent(*args):",
"def getPercent(*args):",
"def update_percent(self):",
"def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True",
"def patrimony_total(self):\n pass",
"def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def CalculateBoundProbability(self, psi):\n\n\t\t_, _, _, boundTotal = self.CalculateBoundDistribution(psi)\n\n\t\treturn boundTotal",
"def get_free_set_percentage(self, params):\n raise NotImplementedError()",
"def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp",
"def fractionPassing(self):\n return self.cut.entries / self.entries",
"def robbins(counts):\n return float(singles(counts))/counts.sum()",
"def norm_percent(raw):\n if sum(raw) != 0:\n return [float(i)/sum(raw)*100 for i in raw]\n else:\n return [0 for i in raw]",
"def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den"
] | [
"0.6523484",
"0.6491968",
"0.63697445",
"0.6303792",
"0.6300233",
"0.62549525",
"0.6121597",
"0.6087575",
"0.60844654",
"0.6044818",
"0.6027688",
"0.6009361",
"0.5989413",
"0.5969596",
"0.59609574",
"0.59609574",
"0.59606254",
"0.5904455",
"0.5899307",
"0.58770585",
"0.584258",
"0.58401334",
"0.58382976",
"0.5824316",
"0.5794439",
"0.5781197",
"0.5766814",
"0.5712529",
"0.57073605",
"0.57034516"
] | 0.6903449 | 0 |
Method which calculate Total Rebound Defensive Percentage | def set_total_reb_def_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
result = 0.00
try:
if bx["minutes"] > 0 and bx["minutes"] > 0:
result = ((bx["reb_def"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_def"] + opp_team["reb_of"])))*100
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
self.total_reb_def_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_of\"] + opp_team[\"reb_def\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n self.total_reb_of_percentage = \"%.2f\" % round(result, 2)",
"def get_free_set_percentage(self, params):\n raise NotImplementedError()",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0",
"def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty",
"def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True",
"def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget",
"def overall_reduction(self):\n return 84",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def calculate_profit(self):",
"def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0",
"def max_occupancy_percent_for_deferred_work(self):\n return self._max_occupancy_percent_for_deferred_work",
"def _calc_freeze_probability(self, num_iterations, final_fraction):\n return 1.0 - (final_fraction ** (1.0 / num_iterations))",
"def mask_percentage(self):\n return 100 - self.tissue_percentage",
"def penalty(self):\n return 0",
"def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp",
"def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0",
"def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity",
"def total_sdram_requirements(self):",
"def recall(self) -> float:\n if self.ref_ignored:\n num_ref_ignored = len(self.ref_set) - len(self.ref_unignored_set)\n self.num_ignored += num_ref_ignored\n # True Positive = the number of unignored reference mappings that are Positive\n tp = len(self.ref_unignored_set.intersection(self.pre_set))\n # False Negative = the number of unignored reference mappings that are Negative\n fn = len(self.ref_set) - tp - num_ref_ignored\n return tp / (tp + fn)",
"def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0",
"def rF(count, total):\n\treturn float(count)/float(total)",
"def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number",
"def update_percent(self):"
] | [
"0.6720814",
"0.65615296",
"0.64223516",
"0.63799196",
"0.63289535",
"0.63207704",
"0.63154423",
"0.62732965",
"0.6261528",
"0.6167289",
"0.6124796",
"0.61081874",
"0.6068133",
"0.60473025",
"0.5999613",
"0.5988745",
"0.59840715",
"0.5983306",
"0.5973883",
"0.59696484",
"0.59576386",
"0.59197676",
"0.59165156",
"0.587996",
"0.5865504",
"0.5859559",
"0.585895",
"0.5858603",
"0.5855112",
"0.5829298"
] | 0.6653622 | 1 |
Method which calculate Steals Percentage of a player | def set_steals_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
poss = self.get_team_possessions()
result = 0.00
if bx["minutes"] > 0:
result = ((bx["steals"] * (team["minutes"]/Decimal('5'))) / Decimal(float(bx["minutes"]) * poss)) * 100
self.steals_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def set_ts_percentage(self):\n bx = self.get_standard_stats()\n ptos = float(bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tsAttempts = float(tcInt + (0.44*float(bx[\"tl_int\"])))\n result = 0.00\n if tsAttempts > 0.00:\n result = (ptos/(2*tsAttempts))*100\n self.ts_percentage = \"%.2f\" % round(result, 2)",
"def per_hour(self):\n if self.is_salary():\n return 0.0\n return self.wage_cents / 100.0",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def getPercent(*args):",
"def getPercent(*args):",
"def percentage(count, total):\n return count / total * 100",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps",
"def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))",
"def get_percent(self):\n return self.percent",
"def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))",
"def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None",
"def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)",
"def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)"
] | [
"0.6973858",
"0.68804467",
"0.68694776",
"0.6837882",
"0.6696774",
"0.65600723",
"0.6460961",
"0.6452967",
"0.64353186",
"0.6403429",
"0.6403429",
"0.6369753",
"0.6368016",
"0.63537014",
"0.63537014",
"0.63495165",
"0.6336215",
"0.632231",
"0.62704694",
"0.6268536",
"0.6268536",
"0.6254428",
"0.6247955",
"0.6245096",
"0.62445295",
"0.62402153",
"0.62060624",
"0.6184223",
"0.61820304",
"0.61702466"
] | 0.77550447 | 0 |
Method which calculate Assists Percentage of a player | def set_assists_percentage(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
team_tc_conv = team["t2p_conv"] + team["t3p_conv"]
player_tc_conv = bx["t2p_conv"] + bx["t3p_conv"]
result = 0.00
try:
if bx["minutes"] > 0:
result = (bx["assists"] / (((bx["minutes"] / (team["minutes"] / 5)) * team_tc_conv) - player_tc_conv))*100
result = result if result <= 100 and result >= 0 else 0
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
self.assists_percentage = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def percentage(count, total):\n return count / total * 100",
"def getPercent(*args):",
"def getPercent(*args):",
"def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def percentage(a, b):\n return (a * 100.0) / b",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None",
"def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)",
"def get_crawlera_incapsula_percent(crawlera_user):\n if crawlera_user:\n return 0\n else:\n return 100",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def get_percent(self):\n return self.percent",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def walkout_percentage_average(df,start_year, end_year,bat_met, player_name):\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n bb_val = round((pd.to_numeric(df['BB.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(bb_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return walkout_percentage_average(df,start_year, end_year,bat_met, player_name)",
"def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)",
"def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)",
"def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))",
"def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def update_percent(self):"
] | [
"0.7001746",
"0.68421483",
"0.6813",
"0.67631215",
"0.67387015",
"0.6656908",
"0.6627645",
"0.6627645",
"0.6618226",
"0.6604493",
"0.66013306",
"0.6542234",
"0.6510403",
"0.64416814",
"0.64405453",
"0.64405453",
"0.64318055",
"0.64273596",
"0.63814425",
"0.63652843",
"0.6354084",
"0.6325873",
"0.6309475",
"0.6297875",
"0.6286082",
"0.6275193",
"0.62622076",
"0.62381136",
"0.62343794",
"0.62315017"
] | 0.767859 | 0 |
Method which calculate Ratio Assists Per Turnover of a player | def set_assists_per_turnover(self):
bx = self.get_standard_stats()
ratio = bx["assists"]
if bx["turnovers"] > 0:
ratio = bx["assists"] / bx["turnovers"]
self.assists_per_turnover = "%.2f" % round(ratio, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)",
"def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)",
"def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp",
"def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)",
"def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100",
"def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2",
"def rate_board(board, player):\n approx_player_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == player\n )\n approx_opponent_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == -player\n )\n return approx_player_moves - approx_opponent_moves",
"def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total",
"def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result",
"def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score",
"def custom_score_3(game, player):\n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: log of avaliable moves ratio\n return float(log(own_moves/opp_moves))"
] | [
"0.69899476",
"0.66916925",
"0.66868806",
"0.661133",
"0.64811707",
"0.63243896",
"0.62888366",
"0.62563837",
"0.623092",
"0.6226826",
"0.6220208",
"0.62145406",
"0.6178597",
"0.6162016",
"0.6135676",
"0.6127374",
"0.6116254",
"0.60962725",
"0.60838556",
"0.6064083",
"0.6043542",
"0.6043173",
"0.6042079",
"0.6042079",
"0.6042079",
"0.6026387",
"0.6002399",
"0.5992365",
"0.5990092",
"0.59802413"
] | 0.7008181 | 0 |
Method which calculate Assists Ratio of a player | def set_assists_ratio(self):
bx = self.get_standard_stats()
tcInt = float(bx["t2p_int"] + bx["t3p_int"])
denominador = tcInt + (0.44 * float(bx["tl_int"])) + float(bx["assists"]) +float(bx["turnovers"])
numerador = float(bx["assists"])
result = 0.00
if denominador > 0:
result = (numerador / denominador) * 100
self.assists_ratio = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)",
"def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def current_ratio(self):\n return self.current_assets / self.current_liabilities",
"def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def horizontal_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)\n pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)\n return (pupil_left + pupil_right) / 2",
"def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result",
"def golden_ratio():\n print((1+math.sqrt(5))/2)",
"def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def GetResult(self, playerjm):\n return self.score / len(self.scores)",
"def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes",
"def _ratio(a1, a2):\n abs_residues = np.abs(a1 - a2).sum()\n avg_abs_sum = 0.5 * np.abs(a1).sum() + 0.5 * np.abs(a2).sum()\n return abs_residues / avg_abs_sum",
"def calculate_score_pairs(hand_value,*args):\n # ratios=[1,10,100,1000,10000]\n ratios = CONST.RATIOS[:]\n return sum(map(lambda a,b:a/b, args, ratios))+hand_value",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores",
"def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating"
] | [
"0.72646964",
"0.64316165",
"0.6352074",
"0.6281999",
"0.6247194",
"0.6209247",
"0.6207199",
"0.61734754",
"0.61600745",
"0.61037296",
"0.609562",
"0.607315",
"0.60665053",
"0.60465765",
"0.6046293",
"0.60187536",
"0.6009164",
"0.59791255",
"0.5958189",
"0.59420407",
"0.5881529",
"0.5880197",
"0.58754414",
"0.5865603",
"0.5865355",
"0.5836641",
"0.5815389",
"0.5805758",
"0.57964987",
"0.57841754"
] | 0.7257836 | 1 |
Method which calculate Defensive Ratio of a player. The total points received in 100 possessions | def set_defensive_ratio(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
if bx["minutes"] > 0:
opp_fga = opp_team["t2p_int"] + opp_team["t3p_int"]
opp_fgm = opp_team["t2p_conv"] + opp_team["t3p_conv"]
try:
dor = Decimal(opp_team["reb_of"] / (opp_team["reb_of"] + team["reb_def"]))
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
dor = 0
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
dor = 0
try:
dfg = Decimal(opp_fgm / opp_fga)
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
dfg = 0
try:
fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))
except:
fmwt = 0
stops1 = bx["steals"] + bx["block_shots"] * fmwt * (1 - Decimal('1.07') * dor) + bx["reb_def"] * (1 - fmwt)
try:
stops2 = (Decimal((opp_fga - opp_fgm - team["block_shots"]) / team["minutes"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team["turnovers"] - team["steals"]) / team["minutes"])) * bx["minutes"] + Decimal(bx["fouls_cm"] / team["fouls_cm"]) * Decimal('0.4') * opp_team["tl_int"] * (1 - Decimal(opp_team["tl_conv"] / opp_team["tl_int"]))**2
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
stops2 = 0
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
stops2 = 0
stops = stops1 + stops2
poss = self.get_team_possessions()
if bx["minutes"] > 0:
stop_percentage = (float(stops) * float(opp_team["minutes"])) / (float(poss) * float(bx["minutes"]))
else:
stop_percentage = 0.00
opp_points = opp_team["t2p_conv"] * 2 + opp_team["t3p_conv"] * 3 + opp_team["tl_conv"]
team_defensive_rating = 100 * (float(opp_points) / poss)
try:
d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team["tl_conv"]) / float(opp_team["tl_int"])))**2) * float(opp_team["tl_int"])*0.4)
result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
d_pts_per_scposs = 0
result = 0.00
# print("dor: " + str(dor))
# print("dfg: " + str(dfg))
# print("fmwt: " + str(fmwt))
# print("stops1: " + str(stops1))
# print("stops2: " + str(stops2))
# print("stops: " + str(stops))
# print("poss: " + str(poss))
# print("stop_percentage: " + str(stop_percentage))
# print("opp_points: " + str(opp_points))
# print("team_defensive_rating: " + str(team_defensive_rating))
# print("d_pts_per_scposs: " + str(d_pts_per_scposs))
# print("drtg: " + str(result) + "\n")
else:
result = 0.00
self.drtg = "%.2f" % round(result, 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount",
"def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def KPI(self, total=True):\n \n data = self.select_table('ChordLog')\n correct = data[data['PredictedLabel'] == data['ActualLabel']]\n\n # % correctly predicted in chord net\n human_level_performance = (len(correct) / len(data)) * 100\n \n # round value\n human_level_performance = round(human_level_performance, 4) \n \n return human_level_performance",
"def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat",
"def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def ratio_local_prod(self):\n if self.current_energy_produced == 0.0:\n return 1.0\n else:\n return 1. - self.export_grid / self.current_energy_produced",
"def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total",
"def get_free_set_percentage(self, params):\n raise NotImplementedError()",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)",
"def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)",
"def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)",
"def calculate_probability(self):\n return 0",
"def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0",
"def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)"
] | [
"0.6531963",
"0.6401618",
"0.61817557",
"0.6132738",
"0.6081823",
"0.6071461",
"0.60199296",
"0.5966795",
"0.5962311",
"0.59621847",
"0.5957963",
"0.59393156",
"0.59319943",
"0.5922137",
"0.5918801",
"0.5913682",
"0.5907549",
"0.58991516",
"0.5896585",
"0.5889761",
"0.5885497",
"0.58792573",
"0.5869189",
"0.5865168",
"0.58413595",
"0.5838629",
"0.5836382",
"0.58291775",
"0.5822119",
"0.58184373"
] | 0.65123755 | 1 |
Method which calculate Offensive Ratio of a player. The total points scored in 100 possessions | def set_offensive_ratio(self):
bx = self.get_standard_stats()
team = self.get_team_stats()
opp_team = self.get_opp_team_stats()
if bx["minutes"] > 0 and (bx["t2p_int"] + bx["t3p_int"]) > 0:
fgm = bx["t2p_conv"] + bx["t3p_conv"]
fga = bx["t2p_int"] + bx["t3p_int"]
team_fgm = team["t2p_conv"] + team["t3p_conv"]
team_fga = team["t2p_int"] + team["t3p_int"]
team_points = team["t2p_conv"]*2 + team["t3p_conv"]*3 + team["tl_conv"]
points = bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]
try:
qAST = (Decimal(bx["minutes"] / (team["minutes"] / 5)) * (Decimal('1.14') * Decimal((team["assists"] - bx["assists"]) / team_fgm))) + \
Decimal((((team["assists"] / team["minutes"]) * bx["minutes"] * 5 - bx["assists"]) / ((team_fgm / team["minutes"]) * bx["minutes"] * 5 - fgm)) * (1 - (bx["minutes"] / (team["minutes"] / 5))))
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
qAST = 1
except InvalidOperation:
print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC)
qAST = 1
fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST)
try:
ast_part = Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2*(team_fga - fga))) * bx["assists"]
except ZeroDivisionError:
print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC)
ast_part = 0
if bx["tl_int"] > 0:
ft_part = Decimal(1 - (1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"]
else:
ft_part = 0
team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team["tl_conv"] / team["tl_int"]))**2) * team["tl_int"] * Decimal('0.4'))
try:
team_orb_percentage = Decimal(team["reb_of"] / (team["reb_of"] + ((opp_team["reb_def"] + opp_team["reb_of"]) - opp_team["reb_of"])))
except ZeroDivisionError:
print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC)
team_orb_percentage = 0
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
team_orb_percentage = 0
team_play_percentage = Decimal(team_scoring_poss / (team_fga + team["tl_int"] * Decimal('0.4') + team["turnovers"]))
try:
team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
team_orb_weight = 0
orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage
fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)
if bx["tl_conv"] > 0:
ft_x_poss = Decimal((1 - (bx["tl_conv"] / bx["tl_int"]))**2) * Decimal('0.4') * bx["tl_int"]
else:
ft_x_poss = Decimal(1 - (bx["tl_conv"] / 1)**2) * Decimal('0.4') * bx["tl_int"]
try:
sc_poss = (fg_part + ast_part + ft_part) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
sc_poss =0
tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx["turnovers"]
pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx["t3p_conv"]) * (1 - Decimal('0.5') * Decimal((points - bx["tl_conv"]) / (2 * fga)) * qAST)
try:
pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team["t3p_conv"] - bx["t3p_conv"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team["tl_conv"]) - (points - bx["tl_conv"])) / (2 * (team_fga - fga))) * bx["assists"]
except:
pprod_ast_part = 0
pprod_orb_part = bx["reb_of"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team["tl_conv"] / team["tl_int"])**2) * Decimal('0.4') * team["tl_int"]))
try:
pprod = (pprod_fg_part + pprod_ast_part + bx["tl_conv"]) * (1 - (team["reb_of"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
pprod = 0
try:
result = 100 * (pprod / tot_poss)
except InvalidOperation:
print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC)
result = 0
# print("fgm: " + str(fgm))
# print("fga: " + str(fga))
# print("team_fgm: " + str(team_fgm))
# print("team_fga: " + str(team_fga))
# print("team_points: " + str(team_points))
# print("points: " + str(points))
# print("qAST: " + str(qAST))
# print("fg_part: " + str(fg_part))
# print("ast_part: " + str(ast_part))
# print("ft_part: " + str(ft_part))
# print("team_scoring_poss: " + str(team_scoring_poss))
# print("team_orb_percentage: " + str(team_orb_percentage))
# print("team_play_percentage: " + str(team_play_percentage))
# print("team_orb_weight: " + str(team_orb_weight))
# print("orb_part: " + str(orb_part))
# print("fg_x_poss: " + str(fg_x_poss))
# print("ft_x_poss: " + str(ft_x_poss))
# print("sc_poss: " + str(sc_poss))
# print("tot_poss: " + str(tot_poss))
# print("pprod_fg_part: " + str(pprod_fg_part))
# print("pprod_ast_part: " + str(pprod_ast_part))
# print("pprod_orb_part: " + str(pprod_orb_part))
# print("pprod: " + str(pprod))
# print("result: " + str(result) + "\n")
else:
result = 0.00
self.ortg = "%.2f" % round(result, 2)
if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:
"""For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG
using team's formula"""
print(BCOLORS.OKBLUE + "ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo" + BCOLORS.ENDC)
bx = self.get_standard_stats()
result = round((bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"])/self.get_team_possessions(), 2)
self.ortg = "%.2f" % result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )",
"def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))",
"def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100",
"def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)",
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward",
"def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()",
"def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)",
"def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()",
"def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate",
"def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)",
"def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)",
"def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes",
"def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0",
"def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum",
"def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent",
"def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval",
"def calc_win_lose_ratio(self):\n total = len(self.train_y)\n survived = 0\n for i in self.train_y:\n if i > 0:\n survived += 1\n\n self.survival_sum = [survived, total-survived]",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100"
] | [
"0.6951562",
"0.6515817",
"0.644771",
"0.6344681",
"0.62165296",
"0.62093073",
"0.61908257",
"0.6157803",
"0.6073757",
"0.60568166",
"0.6025391",
"0.60042846",
"0.5987803",
"0.5981942",
"0.5976337",
"0.5951976",
"0.5948813",
"0.5936161",
"0.593527",
"0.593107",
"0.5930244",
"0.59181416",
"0.59173024",
"0.5916344",
"0.5905475",
"0.59005296",
"0.589831",
"0.5896911",
"0.58819103",
"0.58808035"
] | 0.6635203 | 1 |
auth_data will be used used as request_data in strategy | def set_input_data(self, request, auth_data):
request.auth_data = auth_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_oauth_data():",
"def __init__(self, my_data, my_auth):\n self.user = my_auth.user\n self.password = my_auth.password\n self.my_data = my_data",
"def authenticate(self, request):\n auth_data = super().authenticate(request)\n if not auth_data:\n return auth_data\n\n user, auth = auth_data\n\n if amr_claim := auth.data.get(\"amr\"):\n user.token_amr_claim = amr_claim\n\n return user, auth",
"def add_auth(self, http_request):\r\n pass",
"def auth(self, user):",
"def auth_token(self):",
"def fake_auth_complete(self, strategy):\r\n args = ()\r\n kwargs = {\r\n 'request': strategy.request,\r\n 'backend': strategy.backend,\r\n 'user': None,\r\n 'response': self.get_response_data(),\r\n }\r\n return strategy.authenticate(*args, **kwargs)",
"def request_data(self):\n pass",
"def auth_extra_arguments(self):\n extra_arguments = super().auth_extra_arguments()\n extra_arguments[\"p\"] = self.policy or self.data.get(\"p\")\n return extra_arguments",
"def set_auth_state(self, data):\n raise NotImplementedError()",
"def _get_auth_string(self):",
"def update_auth_data(self, auth_data: AuthData) -> None:\n self.auth_data.update(auth_data)\n if \"refresh_id\" in self.auth_data:\n self.set_cookie(COOKIE_NAME, self.auth_data[\"refresh_id\"])\n if self.on_auth_data_changed:\n self.on_auth_data_changed(self.auth_data)",
"def authenticate(user, request):",
"def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):\r\n request = self.request_factory.get(\r\n pipeline.get_complete_url(self.backend_name) +\r\n '?redirect_state=redirect_state_value&code=code_value&state=state_value')\r\n request.user = auth_models.AnonymousUser()\r\n request.session = cache.SessionStore()\r\n request.session[self.backend_name + '_state'] = 'state_value'\r\n\r\n if auth_entry:\r\n request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry\r\n\r\n strategy = social_utils.load_strategy(backend=self.backend_name, redirect_uri=redirect_uri, request=request)\r\n request.social_strategy = strategy\r\n\r\n return request, strategy",
"def __init__(self, auth_class):\n self.url = auth_class.url\n self.ticket = auth_class.ticket\n self.CSRF = auth_class.CSRF",
"def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()",
"def on_get(self, req, resp):\n data = req.context['auth']\n tenant = dict(id=data.get('domain_id', None), name=data.get('domain_name', None))\n role = dict(name=data.get('roles')[0])\n user = dict(id=data.get('user_id', None), name=data.get('user_name', None), tenant=tenant, role=role)\n data = dict(user=user)\n req.context['result'] = dict(session=data)\n resp.status = HTTP_200",
"def post(self):\n\n data = request.get_json()\n # data = request.data\n print(\"data: \", data)\n\n arg_parser = reqparse.RequestParser()\n arg_parser.add_argument(\n \"exp\",\n default=15552000,\n help=\"Parameter must be an integer\",\n type=int\n )\n\n args = arg_parser.parse_args()\n\n print(args)\n\n auth = request.authorization\n print(\"auth req: \", auth)\n if not auth:\n # Try extracting from POST body\n print(\"here\")\n auth = request.get_json()\n print(\"here\")\n print(\"auth: \", auth)\n if not auth or not (\"email\" in auth and \"password\" in auth):\n abort(401, \"Missing authentication credentials\")\n\n # if auth[\"is_driver\"]:\n # # if it is a driver\n # user = Driver.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n # else:\n # # If it is a restaurant\n # user = Restaurant.identify(auth[\"email\"])\n # password = auth[\"password\"]\n\n is_driver = True\n\n user = Driver.identify(auth[\"email\"])\n password = auth[\"password\"]\n\n if not user:\n user = Restaurant.identify(auth[\"email\"])\n is_driver = False\n\n if not user or not user.verify_password(password):\n current_app.logger.warn(\n \"Incorrect credentials for {} from {}\".format(\n auth[\"email\"],\n *request.access_route\n )\n )\n abort(401, \"Incorrect email or password\")\n\n access_token = user.gen_access_token(args[\"exp\"])\n\n current_app.logger.info(\"[AUTH] User {} logged IN from {}\".format(\n user.email,\n *request.access_route\n ))\n\n access_token.update({\n \"is_driver\": is_driver\n })\n\n # return resp, 200\n return access_token",
"def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)",
"def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)",
"def init(self, auth_dict=None):\n self.auth_dict = auth_dict",
"def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)",
"def __init__(self, **kwargs):\n self.data_dict = dict()\n self.data_list = dict()\n self.user_id = kwargs[\"user_id\"]",
"def authentication_hook(self):\n pass",
"def auth(self):\n return self.api(self.token)",
"def get_request_auth_app(self):\n pass",
"def __init__(self, auth):\n super(Socrata, self).__init__(auth)\n self.views = Views(auth)\n self.sources = Sources(auth)\n self.configs = Configs(auth)",
"def __init__(self):\n self.auth()",
"def _get_auth_data(self, storage_type, provider_id='default'):\n if storage_type == 'S3':\n return self.s3_auth.get(provider_id, None)\n elif storage_type == 'MINIO':\n return self.minio_auth.get(provider_id, None)\n elif storage_type == 'ONEDATA':\n return self.onedata_auth.get(provider_id, None)\n elif storage_type == 'WEBDAV':\n return self.webdav_auth.get(provider_id, None)\n return None",
"def user_data(self, access_token, *args, **kwargs):\n return self.get_json(\n 'http://sso.rnoep.raccoongang.com/oauth2/access_token/%s/' % access_token,\n params={'access_token': access_token}\n )"
] | [
"0.6669421",
"0.6569925",
"0.63427466",
"0.61100876",
"0.6065976",
"0.6054941",
"0.6029151",
"0.60213995",
"0.6012752",
"0.5897798",
"0.5737002",
"0.5730088",
"0.56567615",
"0.5615505",
"0.56064767",
"0.55513734",
"0.5519616",
"0.5500598",
"0.5468376",
"0.5468376",
"0.54406774",
"0.54257256",
"0.5405854",
"0.54002213",
"0.53946966",
"0.53935367",
"0.53932387",
"0.5377167",
"0.53764945",
"0.53530496"
] | 0.71807003 | 0 |
Tests that only 'admin' can add a product | def test_only_admin_can_create_product(self):
resp = self.admin_create_user()
reply = self.attendant_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Unauthorized Access!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)",
"def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')",
"def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin(self):\n assert(admin)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))",
"def test_add_admin_to_org(self):\n pass",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()",
"def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)",
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_add_product_view_for_unauthenticated_users(client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 302\n assert response.url == \"/accounts/login/?next=/products/add-product/\"",
"def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)",
"def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)",
"def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)",
"def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)"
] | [
"0.7732614",
"0.766534",
"0.746467",
"0.73577404",
"0.7302724",
"0.7297793",
"0.7198699",
"0.7088031",
"0.706756",
"0.69749635",
"0.6925475",
"0.6893543",
"0.6830343",
"0.68255603",
"0.6805827",
"0.68006784",
"0.677526",
"0.67369896",
"0.6678268",
"0.66471297",
"0.6601341",
"0.6589541",
"0.6563557",
"0.6555914",
"0.6552673",
"0.6546463",
"0.65432596",
"0.6537618",
"0.6526094",
"0.6525832"
] | 0.8407797 | 0 |
Tests that 'admin' can add a product | def test_admin_create_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))",
"def test_categories_product_admin(self):\n response = self.client.post('api/v1/category/products',\n data=json.dumps(category_product[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 401)\n self.assertIn('unauthorized', str(response.data))",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin(self):\n assert(admin)",
"def test_admin_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Product deleted!')\n self.assertEqual(resp.status_code, 200)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_PRODUCTS_addProduct(browser, url, username, password):\n\t\t\n\t#initialise browser and login with valid credentials\n\tgo_to_admin(browser,url,username,password)\n\n\ttry:\n\t\t# Select \"Add a Product\" link from \"Products\" menu\n\t\tbrowser.find_element_by_link_text('Products').click()\n\t\tbrowser.find_element_by_link_text('Add a Product').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Details of the product\n\telement = wait_until_element_present(browser, 'product-name', 'ID')\n\telement.send_keys('Testing')\n\n\ttry:\n\t\tbrowser.find_element_by_id('product-price').send_keys(\"10.45\")\n\t\tbrowser.find_element_by_xpath('//li[@title = \"'+category_name+'\"]/a[text()=\"'+category_name+'\"]').click()\n\t\tbrowser.find_element_by_id('product-weight').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-sku').send_keys(SKU)\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \t\n\n\ttry:\n\t\tbrowser.execute_script(\"tinyMCE.activeEditor.dom.remove(tinyMCE.activeEditor.dom.select('p'));\")\n\t\tbrowser.execute_script(\"tinymce.activeEditor.execCommand('mceInsertContent', true, \\\"TEST AUTOMATION BANNER\\\");\")\n\texcept WebDriverException:\n\t\tbrowser.find_element_by_id('wysiwyg').clear()\n\t\tbrowser.find_element_by_id('wysiwyg').send_keys('TEST AUTOMATION BANNER')\n\n\ttry:\t\n\t\tbrowser.find_element_by_id('product-width').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-height').send_keys(\"1\")\n\t\tbrowser.find_element_by_id('product-depth').send_keys(\"1\")\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise \n\t\t\n\tUpload \"Image and Video\" of the product\n\ttry:\n\t\tbrowser.find_element_by_link_text('Images & Videos').click()\n\t\tfile = browser.find_element_by_xpath('//input[@class = \"file-upload\"]')\n\t\tfile.send_keys(product_img_path)\n\t\ttime.sleep(15)\n\t\tbrowser.find_element_by_id('product-videos-search-query').send_keys(product_video_url)\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\telement=wait_until_element_present(browser,'product-videos-search','ID')\n\telement.click()\n\n\ttry:\n\t\ttime.sleep(15)\t\n\t\tbrowser.find_element_by_xpath('//label[@for = \"'+product_video_label+'\"]').click()\n\n\t\t#Provide \"Inventory\" detials of the product\n\t\tbrowser.find_element_by_link_text('Inventory').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-inventory-tracking-1\"]').click()\n\t\tclear_field(browser,'inventory-level')\n\t\tbrowser.find_element_by_id('inventory-level').send_keys(\"123\")\n\t\tclear_field(browser,'inventory-warning-level')\n\t\tbrowser.find_element_by_id('inventory-warning-level').send_keys(\"123\")\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t\t\n\ttry:\t\n\t\t#Select Product Delivery details\n\t\tbrowser.find_element_by_link_text('Delivery/Event Date').click()\n\t\tbrowser.find_element_by_xpath('//label[@for = \"product-event-date-required\"]').click()\n\t\tbrowser.find_element_by_link_text('Details').click()\n\t\tbrowser.find_element_by_name('btn-save').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\n\tverify_and_assert_success_message(browser, \"The new product has been added successfully.\", \".alert-success\")\n\n\t# View newly created Product in control panel\n\tbrowser.find_element_by_link_text('Products').click()\n\tbrowser.find_element_by_link_text('View Products').click()\n\telement = wait_until_element_present(browser,'search-query','ID')\n\telement.send_keys(SKU)\n\tbrowser.find_element_by_xpath('//button[@class = \"btn btn-secondary filter-button\"]').click()\n\ttime.sleep(15)\n\tbrowser.find_element_by_xpath(\"//tr[contains(.,'\" + SKU + \"')]\").find_element_by_css_selector('.dropdown-trigger').click()\n\tbrowser.find_element_by_link_text('View').click()\n \n #Switching to cart window \n\tfor handle in browser.window_handles:\n\t\t\t\tbrowser.switch_to_window(handle)\n\n\t#Provide required delivery date\n\ttry:\n\t\twait_until_element_present(browser,'EventDateMonth','ID')\n\t\tselect_dropdown_value(browser, 'EventDateMonth', 'Jan')\n\t\tselect_dropdown_value(browser, 'EventDateDay', '1')\n\t\tselect_dropdown_value(browser, 'EventDateYear', '2013')\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\n\t\t\t\n\telement = wait_until_element_present(browser, '//input[contains(@src,\"AddCartButton.gif\")]', 'XPATH')\n\telement.click()\n\n\t#Proceeding to checkout as guest\n\ttime.sleep(15)\n\twait_until_element_present(browser, '//a[@title = \"Click here to proceed to checkout\"]', 'XPATH').click()\n\telement = wait_until_element_present(browser, 'checkout_type_guest', 'ID')\n\telement.click()\n\telement = wait_until_element_present(browser, 'CreateAccountButton', 'ID')\n\telement.click()\n\twait_until_element_present(browser, 'FormField_1', 'ID')\n\n\t#Provide Billing Details and proceed further\n\ttry:\n\t\tbrowser.find_element_by_id('FormField_1').clear()\n\t\tbrowser.find_element_by_id('FormField_1').send_keys('[email protected]')\n\t\tbrowser.find_element_by_id('FormField_4').clear()\n\t\tbrowser.find_element_by_id('FormField_4').send_keys('Virendra')\n\t\tbrowser.find_element_by_id('FormField_5').clear()\n\t\tbrowser.find_element_by_id('FormField_5').send_keys('Brahmbhatt')\n\t\tbrowser.find_element_by_id('FormField_7').clear()\n\t\tbrowser.find_element_by_id('FormField_7').send_keys('234234423234')\n\t\tbrowser.find_element_by_id('FormField_8').clear()\n\t\tbrowser.find_element_by_id('FormField_8').send_keys('George Street')\n\t\tbrowser.find_element_by_id('FormField_10').clear()\n\t\tbrowser.find_element_by_id('FormField_10').send_keys('Sydney')\n\t\tselect_dropdown_value(browser, 'FormField_11', 'Australia')\n\t\tselect_dropdown_value(browser, 'FormField_12', 'New South Wales')\n\t\tbrowser.find_element_by_id('FormField_13').clear()\n\t\tbrowser.find_element_by_id('FormField_13').send_keys('2000')\n\t\tbrowser.find_element_by_css_selector('.Submit .billingButton').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\t\n\n\n\t# Select shipping method\n\telement = wait_until_element_present(browser, \"//input[contains(@id, 'shippingCheck')]\", 'XPATH')\n\telement.click()\n\tbrowser.find_element_by_xpath(\"//div[@class='ML20']/input[@type='submit' and contains(@value,'Continue')]\").click()\n\n\t# Proceed to payment\n\ttry:\n\t\twait_until_element_present(browser, 'bottom_payment_button', 'ID')\n\t\tbrowser.find_element_by_id('bottom_payment_button').click()\n\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n\t#Provide Credit Card Details\n\ttry:\n\t\twait_until_element_present(browser,'creditcard_cctype','ID')\n\t\tselect_dropdown_value(browser, 'creditcard_cctype', 'Visa')\n\t\tbrowser.find_element_by_id('creditcard_name').send_keys('test')\n\t\tbrowser.find_element_by_id('creditcard_ccno').send_keys('4242424242424242')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpm', 'Jan')\n\t\tselect_dropdown_value(browser, 'creditcard_ccexpy', '2014')\n\t\tbrowser.find_element_by_id('creditcard_cccvd').send_keys('234')\n\t\tbrowser.find_element_by_xpath('//input[@value = \"Pay for Order\"]')\n\t\t\n\texcept NoSuchElementException:\n\t\tbrowser.save_screenshot('Nosuchelement.png')\n\t\traise\t\n\n # Assert the succes message of Order Creation\n\torder_success_msg = 'YOUR ORDER NUMBER IS'\n\tbrowser_success_msg = browser.find_element_by_xpath('//div[@class = \"alert alert-success\"]/p').text\n\n\tif order_success_msg in browser_success_msg:\n\t\tprint \"I found my text\"\n\t\tassert True\n\telse:\n\t\tprint \"No text\"\n\t\tassert False",
"def test_new_product(self):\n prod = Product(name='New Product', price=100, weight=60,\n flammability=0.9)\n self.assertEqual(prod.explode(), '...BABOOM!!')\n self.assertEqual(prod.stealability(), 'Very stealable!')",
"def test_add_admin_to_org(self):\n pass",
"def test_add_new_product(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"price\": \"4500.0\",\n \"name\": \"Producto 3\",\n \"description\": \"Descripcion de producto 3\"\n }\n\n response = self.client.post('/api/1.0/products/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertNotEqual(response.data['published_date'], '')\n self.assertEqual(response.data['name'], 'Producto 3')\n self.assertEqual(response.data['description'], 'Descripcion de producto 3')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '4500.0')\n self.assertEqual(response.data['seller']['user']['username'], self.username)\n self.assertEqual(response.data['category']['name'], 'deportes')",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_order_product(self):\n self.client.force_authenticate(self.user)\n resp = self.client.post(ORDER_URL, data={\n \"product\": self.product.id,\n \"count\": 1,\n \"option_value\": self.option_value.id\n })\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)",
"def test_add_product_during_auth(self):\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n # Adding a product here should succeed\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n # Adding a product here should go to a new basket, not the one we're auth'ing\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n # Adding a product here should go to basket2, not basket1\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)",
"def test_creating_supply_admin(self):\n request = self.factory.post(\n '/api/supplies/', {'name': '3d printer', 'state': 'good state', 'description': 'prints 3d objects'})\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n try:\n supply = Supply.objects.get(name='3d printer')\n self.assertEqual(supply.name, '3d printer')\n self.assertEqual(supply.state, 'good state')\n self.assertEqual(supply.description, 'prints 3d objects')\n except Supply.DoesNotExist:\n self.fail()",
"def test_add_admin(self):\n self.test_create_user()\n self.test_create_organization()\n url = reverse('MGA:add_admin')\n data = {'admin id': 1, 'org_id': 1}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)",
"def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)",
"def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])",
"def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)",
"def test_create_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Banana',\n 'description': '''\n Bananas are one of the most widely consumed fruits in the\n world for good reason. Eating them could help lower blood\n pressure and reduce the risks of cancer and asthma.\n '''\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.Product.objects.filter(name=data['name']).count(), 1)",
"def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)"
] | [
"0.8125268",
"0.78284925",
"0.7689276",
"0.74345493",
"0.7312975",
"0.72979146",
"0.71163535",
"0.709621",
"0.70959914",
"0.7021732",
"0.6964215",
"0.6953487",
"0.6913336",
"0.6887993",
"0.6846328",
"0.6843517",
"0.6843496",
"0.68297696",
"0.67998624",
"0.6782752",
"0.67686844",
"0.6763103",
"0.67531633",
"0.6730877",
"0.6667929",
"0.6656305",
"0.6649149",
"0.6640939",
"0.6637427",
"0.6598826"
] | 0.79675835 | 1 |
Test admin cannot create a product with a blacklisted token | def test_cannot_create_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_create_user_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_post(self):\n url = '/product/xml/'\n response = self.client.post(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_create_without_token(self):\n url = '/api/ingredients/'\n client = APIClient()\n\n response = client.post(url, self.ingredient_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_admin_cannot_add_item(self):\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You cannot make a sale from an Admin account, Consider having an attendant account\")\n self.assertEqual(response.status_code,401)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)",
"def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)",
"def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_unauthorized_add(self):\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 403",
"def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)"
] | [
"0.82479733",
"0.7897591",
"0.7884179",
"0.78223985",
"0.7625364",
"0.7585557",
"0.7400365",
"0.7375994",
"0.72420007",
"0.7152579",
"0.7103639",
"0.70389014",
"0.6944365",
"0.6929796",
"0.68858266",
"0.68301815",
"0.67753994",
"0.6750073",
"0.6746582",
"0.66715974",
"0.66688746",
"0.66519153",
"0.6631966",
"0.6559985",
"0.65317154",
"0.6529461",
"0.6523732",
"0.6455961",
"0.6450408",
"0.6435281"
] | 0.8788049 | 0 |
Tests that 'admin' cannot add a product with empty fields | def test_admin_cannot_create_product_with_empty_fields(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='',
category='',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter all fields!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)",
"def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_create_product_no_data(self):\n resp = self.app.post(\n \"/products\", json={}, content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def _clean_standalone(self):\n if not self.title:\n raise ValidationError(_(\"Your product must have a title.\"))\n if not self.product_class:\n raise ValidationError(_(\"Your product must have a product class.\"))\n if self.parent_id:\n raise ValidationError(_(\"Only child products can have a parent.\"))",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_product_name_is_required(self):\n product = {\n 'name': '',\n 'price': '100.00',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()",
"def test_product_price_is_required(self):\n product = {\n 'name': 'LAPTOP',\n 'price': '',\n 'image': ''\n }\n res = self.client.post(PRODUCTS_URL, product)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_add_missing_field(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[1]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 400)\n self.assertIn('Missing required parameter', str(response.data))",
"def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_invalid_data_course_add(self, app, auth, field):\n app.admin.add_new_course()\n course_data = CreateCourse.random()\n setattr(course_data, field, None)\n app.course.create_course(course_data)\n assert (\n not app.course.all_required_fields_filled()\n ), \"Empty fields are ignored and user data changed successfully!\"",
"def test_manufacturer_bulk_import_invalid(self):\n form = ManufacturerBulkImportForm(data={\"pk\": \"\"})\n\n self.assertFalse(form.is_valid())",
"def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_cannot_create_user_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='',\n username='',\n password='',\n role=''\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Please input all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_create_valid_product(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_contentious_prescription_no_rationale(self):\n url = reverse('admin:prescription_prescription_add')\n data = {\n 'name': 'Test',\n 'planned_season': 1,\n 'planned_year': 2013,\n 'region': 1,\n 'district': 1,\n 'location': 'Test location',\n 'perimeter': 20,\n 'area': 100,\n 'purposes': [1],\n 'remote_sensing_priority': 4,\n 'priority': 2,\n 'contentious': True,\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Prescription.objects.count(), 0)\n form = response.context['adminform'].form\n self.assertEqual(form.errors, {\n 'contentious_rationale': ['A contentious burn requires a '\n 'contentious rationale.']\n })"
] | [
"0.7727357",
"0.71426547",
"0.71228373",
"0.6888725",
"0.6875848",
"0.68659735",
"0.68113697",
"0.6800745",
"0.6779112",
"0.67721987",
"0.6709652",
"0.66885155",
"0.6665036",
"0.6651361",
"0.6607132",
"0.66057837",
"0.66039413",
"0.65725106",
"0.6552758",
"0.6544897",
"0.6522389",
"0.65190476",
"0.6440011",
"0.6418874",
"0.64180654",
"0.6415804",
"0.63955593",
"0.63657457",
"0.63615507",
"0.63556045"
] | 0.8373025 | 0 |
Tests that product_name field cannot contain a number | def test_Product_name_cannot_contain_a_number(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_3',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter strings in name and category!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_category_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='4dens',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)",
"def prodName(self, pName):\r\n if str(pName).isnumeric() == False:\r\n self.__prodName = pName\r\n else:\r\n raise Exception(\"Product Names cannot be numbers\")",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def lf_is_numeric(x):\n words = x.product_name.split()\n if words[x.word_idx].isnumeric():\n return MODELNAME\n return -1",
"def test_add_sale_with_product_name_not_string(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 1, 'price': 1500, 'quantity': 10, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product name should be a string.')",
"def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')",
"def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)",
"def product_isalpha(product):\n if product.isalpha(): #This verifies if the product that has inserted, has a valid name\n return True\n else:\n return False",
"def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_create_invalid_product_code_lengthly(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"asdfghjk\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))",
"def test_create_invalid_product_long_name(self):\n product_name = \"1234567890123456789098765432112345678900987654322123\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_number(self):\n descriptor = clone(SPECIES_OBSERVATION_SCHEMA)\n record = {\n 'Observation Date': \"18/08/2016\",\n 'Latitude': -32,\n 'Longitude': 115,\n 'Species Name': 1234\n }\n schema = SpeciesObservationSchema(descriptor)\n with self.assertRaises(Exception):\n schema.cast_species_name(record)",
"def test_create_invalid_product_blank_name(self):\n product_name = \"\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def lf_is_before_num(x):\n words = x.product_name.split()\n if x.word_idx < len(words)-1 and words[x.word_idx+1].isnumeric():\n return MODELNAME\n return -1",
"def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)",
"def test_starts_with_dollar_sign(self):\n with self.assertRaises(ValidationError):\n field_name_validator('$id')",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def test_ends_with_dollar_sign(self):\n try:\n field_name_validator('id$')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)",
"def test_create_invalid_product_no_name(self):\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 47.00\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False",
"def test_ends_with_dollar_sign(self):\n with self.assertRaises(ValidationError):\n db_name_validator('id$')",
"def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None",
"def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)",
"def test_non_integer_suffix(self):\n with self.assertRaises(Exception) as exception:\n make_rpm_version('0.1.2preX')\n\n self.assertEqual(\n u'Non-integer value \"X\" for \"pre\". Supplied version 0.1.2preX',\n unicode(exception.exception)\n )",
"def test_stock_and_price_must_be_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock='stock',\n price='money'\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)",
"def isnum(self, x):\n\n return x in '1234567890.-'"
] | [
"0.6742002",
"0.6642998",
"0.6555487",
"0.6555487",
"0.6454647",
"0.62910545",
"0.62258327",
"0.6198766",
"0.6166252",
"0.61075205",
"0.5994341",
"0.5968352",
"0.5964386",
"0.595047",
"0.5949145",
"0.5943742",
"0.59381294",
"0.591026",
"0.5901904",
"0.5901547",
"0.58894527",
"0.5888071",
"0.5883961",
"0.58690155",
"0.5856628",
"0.5855989",
"0.58472776",
"0.5842301",
"0.5831338",
"0.5830257"
] | 0.76176125 | 0 |
Tests that category field cannot contain a number | def test_category_cannot_contain_a_number(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='4dens',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Please enter strings in name and category!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_isNumericCategory(self):\r\n obs = self.overview_map.isNumericCategory('Treatment')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.isNumericCategory('DOB')\r\n self.assertEqual(obs, True)",
"def test_isNumericCategory(self):\n obs = self.overview_map.isNumericCategory('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.isNumericCategory('DOB')\n self.assertEqual(obs, True)",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def isNumericCategory(self, category):\r\n category_values = self.getCategoryValues(self.SampleIds, category)\r\n\r\n is_numeric = True\r\n for category_value in category_values:\r\n try:\r\n float(category_value)\r\n except ValueError:\r\n is_numeric = False\r\n return is_numeric",
"def test_drop_numbers():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"123,123.123\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"number\"].dropna().empty",
"def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True",
"def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"",
"def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))",
"def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True",
"def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)",
"def test_add_category_integer_name(self):\n category = json.dumps({\n 'name': 8888,\n })\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 400)\n self.assertIn('Numbers cant be a Name', response.data.decode())",
"def test_getCategoryValue_bad_category(self):\n # Nonexistent category.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', 'foo')\n # Integer category.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', 42)\n # Category of type None.\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\n 'PC.354', None)\n\n # Category on map with no metadata, but that has sample IDs.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', 'Treatment')\n # Integer category on map with no metadata.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', 34)\n # Category of type None on map with no metadata.\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\n 'PC.354', None)",
"def clean(self, value):\n non_decimal = re.compile(r'\\D+')\n value = non_decimal.sub('', value.strip()) \n \n if value and not luhn(value):\n raise forms.ValidationError(\"Please enter a valid credit card number.\")\n return super(BankcardNumberField, self).clean(value)",
"def test_category_invalid(self):\n # wiki and questions\n ques = QuestionFactory(title=u'q1 audio')\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n d1 = DocumentFactory(\n title=u'd1 audio',\n locale=u'en-US',\n category=10,\n is_archived=False,\n tags=[u'desktop'])\n ApprovedRevisionFactory(document=d1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(2, json.loads(response.content)['total'])",
"def is_numeric_and_not_ignored(column):\n if column not in categorical_columns and column not in ignore_columns:\n return True\n return False",
"def test_getCategoryValue_bad_category(self):\r\n # Nonexistent category.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', 'foo')\r\n # Integer category.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', 42)\r\n # Category of type None.\r\n self.assertRaises(KeyError, self.overview_map.getCategoryValue,\r\n 'PC.354', None)\r\n\r\n # Category on map with no metadata, but that has sample IDs.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', 'Treatment')\r\n # Integer category on map with no metadata.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', 34)\r\n # Category of type None on map with no metadata.\r\n self.assertRaises(KeyError, self.no_metadata.getCategoryValue,\r\n 'PC.354', None)",
"def _validate_data_category(data_category: str) -> str:\n valid_categories = DataCategory.__members__.keys()\n if data_category not in valid_categories:\n raise common_exceptions.DataCategoryNotSupported(\n f\"The data category {data_category} is not supported.\"\n )\n return data_category",
"def test_blank_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n rv = self.category('')\n self.assertIn(b'Field must be between 1 and 50 characters long.', rv.data)",
"def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False",
"def validate_number(column_name, value, column_data_type=\"number\"):\n valid = value.isnumeric()\n if valid is False:\n try:\n float(value)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None",
"def test_check_category_input_1(self):\n assert validation.check_category_input(1, []) == False",
"def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)",
"def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False",
"def scrub_category_val(category_val):\n if not isinstance(category_val, str):\n category_val = str(category_val)\n if category_val.lower() == 'nan':\n category_val = 'NaN'\n if not category_val:\n category_val = 'NaN'\n return category_val",
"def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None",
"def must_contain_digit(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"\\d\", str(cell)))",
"def is_number(text):\n return text.lower() in AVRO_NUMBERS",
"def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"",
"def validate_category(self, data):\n try:\n if data['category_name'] == \"\":\n return \"Category_name cannot be blank\"\n if 'category_name' not in data.keys():\n return \"Enter category_name\"\n if not re.match(r\"^[a-zA-Z0-9 _]*$\", data['category_name']):\n return \"category name should contain alphanumerics only\"\n if len(data.keys()) > 1:\n return \"Invalid fields added\"\n else:\n return \"category_valid\"\n except KeyError:\n return \"Add required keys\""
] | [
"0.69292754",
"0.6892475",
"0.68267983",
"0.68267983",
"0.6329393",
"0.60511804",
"0.6026339",
"0.60021937",
"0.5988064",
"0.5981536",
"0.5980512",
"0.5917058",
"0.59100056",
"0.59020984",
"0.5897836",
"0.5872285",
"0.58462423",
"0.58406997",
"0.58369875",
"0.58208215",
"0.5811314",
"0.5794547",
"0.57898664",
"0.578632",
"0.57757646",
"0.5751578",
"0.5736873",
"0.5722728",
"0.57048607",
"0.5696044"
] | 0.70390165 | 0 |
Tests that stock and price fields must be numbers | def test_stock_and_price_must_be_numbers(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock='stock',
price='money'
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_sale_with_price_not_digit_format(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': \"Hand Bag\", 'price': \"1500\", 'quantity': 3, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be an integer.')",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"",
"def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42",
"def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"",
"def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"",
"def test_make_order_with_price_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': -50, 'quantity': 3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Price and quantity must be ints >= 1')",
"def test_price_details_number(self):\n with self.client:\n response = self.add_meal(\"beef\", \"jasmine\")\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a number\")\n self.assertEqual(response.status_code, 400)",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def pricevalidator(self, price):\n if type(price) != int:\n API.abort(400, error_messages[15]['str_price'])\n\n return True",
"def test_10_insert_stock_prices(self):\n p_eur = Price.insert_new_price(\"EUR\", 1.2)\n p_aapl = Price.insert_new_price(\"AAPL\", 163.99)\n p_ibm = Price.insert_new_price(\"IBM\", 145.78)\n p_msft = Price.insert_new_price(\"MSFT\", 75.87)\n\n self.assertTrue(isinstance(p_eur, Price),\n msg=\"Price is NOT returning a valid inserted EUR instance\")\n print(\"Price insert EUR asset is returning the following price: {}\".format(\n p_eur.price,\n ))\n\n self.assertTrue(isinstance(p_aapl, Price),\n msg=\"Price is NOT returning a valid inserted AAPL instance\")\n print(\"Price insert AAPL asset is returning the following price: {}\".format(\n p_aapl.price,\n ))\n\n self.assertTrue(isinstance(p_ibm, Price),\n msg=\"Price is NOT returning a valid inserted IBM instance\")\n print(\"Price insert IBM asset is returning the following price: {}\".format(\n p_ibm.price,\n ))\n\n self.assertTrue(isinstance(p_msft, Price),\n msg=\"Price is NOT returning a valid inserted MSFT instance\")\n print(\"Price insert MSFT asset is returning the following price: {}\".format(\n p_msft.price,\n ))",
"def clean_stock(self):\n stock = self.cleaned_data.get('stock')\n if stock == 0:\n raise forms.ValidationError(u'Please insert product quantity')\n return stock",
"def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values",
"def test_numeric(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_numeric')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_numeric ' \\\n '( value NUMERIC(100,50) NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_numeric VALUES (%s)'\n for i in range(100):\n int = random.getrandbits(150)\n frac = random.getrandbits(150)\n item = decimal.Decimal('%d.%s' % (int, frac))\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_numeric'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, decimal.Decimal)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_numeric')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_numeric')\n cursor.execute(query)\n conn.commit()",
"def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)",
"def check_for_float_and_int(check):",
"def test_add_to_stock_negative(add):\n assert STOCK[0]['quantity'] == 20\n for i in [\"2.32\", \"sd\", -2, 0, 201]:\n value = validate_int(i)\n add[0].add_to_stock(value)\n # there is no change in our stock on invalid input\n assert STOCK[0]['quantity'] == 20\n STOCK[0]['quantity'] = 20",
"def validate_insert(self, s, internal=True):\n super(FieldNumeric, self).validate_insert(s, internal) # mandatory check\n if s:\n try:\n float(s)\n except:\n raise FilemanError(\"\"\"[%s] is not a valid number\"\"\" % s)",
"def check_symbol_price(self, data):\n if self.input_price < float(data.get(\"price\")):\n logging.info(\"Symbol price is higher than the input provided by the user.\")\n logging.info(\"Input Price :- \")\n logging.info(str(self.input_price))\n logging.info(\"Symbol Price :- \")\n logging.info(str(data.get(\"price\")))\n logging.info(\"+++++++++++++++++++++++++++++\")",
"def test_check_price_ok() -> None:\n data = check_price(min_price=1, data={'p': 2.0})\n assert data == {'p': 2.0}",
"def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")",
"def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2",
"def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price",
"def test_loads_base_price_valid(self):\n base_price: BasePrice = BasePrice.Schema().loads(json.dumps(base_price_valid))\n assert base_price.base_price == base_price_valid[\"base-price\"]\n assert base_price.options == base_price_valid[\"options\"]\n assert base_price.product_type == base_price_valid[\"product-type\"]",
"def testNumberAttribute(self):\n def action(field_class):\n # Check range.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n 0)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n -1)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.MAX_FIELD_NUMBER + 1)\n\n # Check reserved.\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.FIRST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n messages.LAST_RESERVED_FIELD_NUMBER)\n self.assertRaises(messages.InvalidNumberError,\n field_class,\n '1')\n\n # This one should work.\n field_class(number=1)\n self.ActionOnAllFieldClasses(action)",
"def test_make_order_with_quantity_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 50, 'quantity': -3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Price and quantity must be ints >= 1')",
"def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')",
"def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()",
"def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)"
] | [
"0.73344076",
"0.72246283",
"0.72246283",
"0.68624747",
"0.66491514",
"0.6644348",
"0.66039526",
"0.65944123",
"0.6510274",
"0.64985764",
"0.6415647",
"0.6365934",
"0.6306292",
"0.6289345",
"0.6277049",
"0.6271073",
"0.62253267",
"0.6222383",
"0.6221313",
"0.616871",
"0.6168377",
"0.616262",
"0.6076003",
"0.606838",
"0.6030939",
"0.60207576",
"0.6013369",
"0.59867245",
"0.5977606",
"0.5947591"
] | 0.7861267 | 0 |
Tests that product already exists in the Inventory | def test_product_exists_in_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'This product exists in the Inventory!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock",
"def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print (product + \" added.\")\n else:\n print (product + \" is already in the cart.\")",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_product_buy_with_not_exists_name(self):\n result_buy = self.info_list.product_buy(\"Говядина Немецкая 2кг\", 3)\n self.assertFalse(result_buy)",
"def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print(product + \" added.\")\r\n else:\r\n print(product + \" is already in the cart.\")",
"def confirm_inventory(self, data, batch): # not used will be deprecated todo\n try:\n batch = batch\n data = data\n location = self.Location.find(['name', '=', 'MyInventory'])[-1]\n inventory = self.Inventory.find([('batch_number', '=', batch), ('location', '=', location.id)])[-1]\n lines = inventory.lines\n for i in data:\n product = \\\n self.Product.find(\n [('code', '=', i['code']), ('description', '=', 'Stock'), ('type', '=', 'goods')])[\n -1]\n supplier = self.Party.find(['name', '=', i['supplier']])[-1]\n for j in lines:\n if j.product == product:\n pro = j.product\n template = pro.template\n template.list_price = Decimal(i['rate'])\n template.save()\n pro.save()\n j.quantity = float(i['quantity'])\n j.supplier = supplier\n j.expiry_date = i['expiry_date']\n j.save()\n inventory.state = 'done'\n inventory.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)",
"def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))",
"def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})",
"def is_product_exists(product_name) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Products WHERE product_name=?)\", (product_name,))\n return cursor.fetchone()[0] == 1",
"def test_update_inventory(self):\n pass",
"def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False",
"def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print product + \" added.\"\n else:\n print product + \" is already in the cart.\"",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_admin_create_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)",
"def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)",
"def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def products_made(self, product) -> bool:\n return self.product_idx(product) is not None",
"def _check_product(self):\n\n self.importable = False\n abcde = string.ascii_uppercase[:5]\n product_infos = self.retrieve_product_infos()\n\n if product_infos['product_code'] is not None:\n try:\n Products.objects.get(\n code=product_infos['product_code']\n )\n except Products.DoesNotExist:\n if (\n product_infos['product_name'] is not None\n and product_infos['product_code'] not in ProductImportation.codes\n and product_infos['product_code'] is not None\n and product_infos['product_url'] is not None\n and product_infos['image_url'] is not None\n and product_infos['quantity'] is not None\n and product_infos['ingredients'] is not None\n and product_infos['brands'] != []\n and product_infos['stores'] != []\n and product_infos['countries'] is not None\n and product_infos['compare_to'] is not None\n and product_infos['categories_hierarchy'] is not None\n and product_infos['nutriscore'] in abcde\n and all([product_infos[nutriment] >= 0 for nutriment in self.list_nutriments])\n and Categories.objects.filter(name=product_infos['compare_to']).count() > 0\n ):\n self.name = product_infos['product_name']\n self.product_infos = product_infos\n self.code = product_infos['product_code']\n ProductImportation.codes.append(self.code)\n self.importable = True\n\n return self.importable",
"def _item_exists(self, location):\n \"Does nothing\"",
"def exist(self, product_item):\n cursor = self.database.cursor(named_tuple=True, buffered=True)\n sql = \"SELECT * FROM favoris WHERE produit_id = '{}' \".format(product_item.id)\n cursor.execute(sql)\n rows = cursor.fetchone()\n if not rows:\n return False\n return True",
"def test_add_product_to_cart(self, driver):\n logging.info(\"Start test case: Continue Shop\")\n data = self.test_data[\"Continue Shop\"][\"Products\"][0]\n logging.info(f\"Test data: [{data}]\")\n product_name = data[\"Product Name\"]\n\n select_product(driver, data[\"Page\"], product_name)\n add_product_to_cart(driver, data[\"Size\"], data[\"Color\"], data[\"Quantity\"])\n assert is_product_in_cart(driver, product_name)\n continue_shopping_from_order_summary(driver)\n assert verify_current_page_is_home(driver)",
"def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500"
] | [
"0.7496905",
"0.7326862",
"0.71235555",
"0.68696725",
"0.6750119",
"0.6638737",
"0.65924627",
"0.6544556",
"0.65392476",
"0.6516768",
"0.6488533",
"0.6471995",
"0.64518577",
"0.6451675",
"0.64378035",
"0.63537824",
"0.6353224",
"0.63370115",
"0.6325946",
"0.6315863",
"0.63116044",
"0.62974346",
"0.6286299",
"0.62745404",
"0.62663484",
"0.62622607",
"0.62600285",
"0.62484556",
"0.62286633",
"0.62086487"
] | 0.81727195 | 0 |
Tests that a user can view a product in the Inventory | def test_view_a_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/1',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertIn('NY_denims', str(reply['product']))
self.assertEqual(resp.status_code, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')",
"def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404",
"def test_view_product_detail(self):\n product = sample_product(supplier_id=self.user)\n\n url = detail_url(product.id)\n res = self.client.get(url)\n\n serializer = ProductSerializer(product)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')",
"def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)",
"def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_product_exists_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product exists in the Inventory!')\n self.assertEqual(resp.status_code, 400)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_add_product_view_for_authenticated_users(user_company, client):\n add_product_url = reverse('add-product')\n response = client.get(add_product_url)\n assert response.status_code == 200",
"def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)",
"def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])",
"def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')",
"def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)",
"def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_product_list_view(sample_product, user_company, client):\n product_list_url = reverse('product-list')\n response = client.get(product_list_url)\n assert response.status_code == 200\n assert Product.objects.count() == 9\n products = Product.objects.all()\n content = response.content.decode(response.charset)\n for product in products:\n assert product.name in content",
"def test_basic_info(self):\n\n url = reverse('stock-item-detail', kwargs={'pk': 1})\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n html = str(response.content)\n\n # Part name\n self.assertIn('Stock Item: M2x4 LPHS', html)\n\n # Quantity\n self.assertIn('<h5>Available Quantity</h5>', html)\n self.assertIn('<h5>4000', html)\n\n # Batch code\n self.assertIn('Batch', html)\n self.assertIn('<td>B123</td>', html)\n\n # Actions to check\n actions = [\n \"id=\\\\\\'stock-count\\\\\\' title=\\\\\\'Count stock\\\\\\'\",\n \"id=\\\\\\'stock-add\\\\\\' title=\\\\\\'Add stock\\\\\\'\",\n \"id=\\\\\\'stock-remove\\\\\\' title=\\\\\\'Remove stock\\\\\\'\",\n \"id=\\\\\\'stock-move\\\\\\' title=\\\\\\'Transfer stock\\\\\\'\",\n \"id=\\\\\\'stock-duplicate\\\\\\'\",\n \"id=\\\\\\'stock-edit\\\\\\'\",\n \"id=\\\\\\'stock-delete\\\\\\'\",\n ]\n\n # Initially we should not have any of the required permissions\n for act in actions:\n self.assertNotIn(act, html)\n\n # Give the user all the permissions\n self.assignRole('stock.add')\n self.assignRole('stock.change')\n self.assignRole('stock.delete')\n\n response = self.client.get(url)\n html = str(response.content)\n\n for act in actions:\n self.assertIn(act, html)",
"def test_list_products(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[1]['description'], 'Descripcion producto 1')",
"def test_product(self):\n self.assertEqual(self.test_product.name, self.test_product_name)\n self.assertEqual(self.test_product.price, self.test_product_price)",
"def test_search_view_when_user_logged_in(self):\n self.user = User.objects.create_user(\n username='person',\n email='[email protected]',\n password='test12345@_password',\n )\n self.client.login(\n username='person',\n password='test12345@_password')\n\n response = self.client.get('/search/?q=')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"products.html\")",
"def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')",
"def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)"
] | [
"0.6914857",
"0.68370014",
"0.68132645",
"0.67801803",
"0.6777728",
"0.6743917",
"0.67292273",
"0.6651706",
"0.663699",
"0.6590794",
"0.6579514",
"0.654358",
"0.6536533",
"0.64720875",
"0.64325994",
"0.6371396",
"0.63542134",
"0.63323015",
"0.6293569",
"0.6277347",
"0.62515086",
"0.62446225",
"0.62368655",
"0.6210974",
"0.6210195",
"0.62101847",
"0.61971897",
"0.618447",
"0.61530405",
"0.6144851"
] | 0.74070454 | 0 |
Tests that a user cannot view a product in the Inventory with blacklisted token | def test_cannot_view_a_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
resp = self.client.get(
'/api/v1/products/1',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403",
"def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))",
"def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)",
"def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_can_not_cancel_current_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)",
"def test_locked_asset_not_logged_in(self):\r\n self.client.logout()\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)"
] | [
"0.77314836",
"0.7319002",
"0.68258417",
"0.6709656",
"0.66352224",
"0.6600747",
"0.65926576",
"0.65914124",
"0.6580106",
"0.6573959",
"0.6558386",
"0.65569514",
"0.6534714",
"0.6512719",
"0.6504638",
"0.64704",
"0.6448948",
"0.6437487",
"0.6420581",
"0.6419264",
"0.63966596",
"0.63964593",
"0.63914037",
"0.6346709",
"0.63102347",
"0.6308961",
"0.629592",
"0.6293923",
"0.6290818",
"0.6275965"
] | 0.79721093 | 0 |
Tests that a user can view all products in the Inventory | def test_view_all_products(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertIn('NY_denims', str(reply['products']))
self.assertEqual(resp.status_code, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')",
"def test_view_a_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['product']))\n self.assertEqual(resp.status_code, 200)",
"def test_list_products(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[1]['description'], 'Descripcion producto 1')",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_product_list_view(sample_product, user_company, client):\n product_list_url = reverse('product-list')\n response = client.get(product_list_url)\n assert response.status_code == 200\n assert Product.objects.count() == 9\n products = Product.objects.all()\n content = response.content.decode(response.charset)\n for product in products:\n assert product.name in content",
"def test_list_products(self):\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n sample_product(supplier_id=self.user)\n\n res = self.client.get(PRODUCTS_URL)\n\n products = Product.objects.all().order_by('-name')\n serializer = ProductSerializer(products, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(len(res.data), 3)",
"def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_get_all_objects(self):\n url = '/product/xml/'\n response = self.client.get(url, **self.headers)\n # Request should not be validated by a 401\n self.failUnlessEqual(response.status_code, 401)\n response = self.client.get(url, **self.advancedheaders)\n # Request should be validated by a 200\n self.failUnlessEqual(response.status_code, 200)\n xml_response = parseString(response.content)\n\n product_tags =[elt for elt in xml_response.getElementsByTagName('object') if elt.getAttribute('model') == 'product.product']\n # check that all product are displayed\n self.failUnlessEqual(len(product_tags), Product.objects.count())",
"def test_add_all(self): #SAUCE-LAB-7\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('\\n')\n print('Not all items were added')",
"def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_view_cart_contents(self):\n User.objects.create_user(\n username=\"testuser\", password=\"thisisasecret101\")\n item = Product(name=\"Product\",\n product_image=\"testing_img.jpg\",\n description=\"Product description.\",\n price=\"20.00\",\n stock_available=\"5\",\n showcase_product=\"True\")\n item.save()\n self.client.login(username=\"testuser\", password=\"thisisasecret101\")\n session = self.client.session\n session[\"cart\"] = {1: 1}\n session.save()\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 200)",
"def test_product_by_category_logged_in_user(self):\n\n # Log In user that is not the seller, check that the products not created by the user do show up\n self.client.login(username=\"test_user\", password=\"secret\")\n\n # Search for product category 1\n response = self.client.get(reverse('website:product_by_category', args=(1,)))\n\n # Check that status code is 200\n self.assertEqual(response.status_code, 200)\n\n # Make sure that only the product associated with product category 1 is displayed\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertNotIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n\n # Search for product category 2\n response_non_seller = self.client.get(reverse('website:product_by_category', args=(2,)))\n\n # Check that the status code is 200\n self.assertEqual(response_non_seller.status_code, 200)\n\n # Make sure that only the product associated with product category 2 is displayed\n self.assertNotIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response_non_seller.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response_non_seller.content)",
"def test_items_in_cart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertTrue(context['display_shopping_cart'])",
"def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.search\"\n current_page = 1\n search_info = json.dumps({\n })\n\n result = self.access_api(flag = flag, api = api, current_page = current_page, search_info = search_info)\n self.assertTrue('data_list' in result)",
"def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')",
"def test_vault_get_all_vault_items(self):\n pass",
"def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')",
"def index(self, user):\n\n cart_products = CartProduct.index(user)\n CartProductsView.index(cart_products)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_list_products_filtered_by_selling_status(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?selling=3')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')",
"def test_get_inventory_list(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)",
"def test_list_products_filtered_by_seller_name(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?seller=testuser1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_show_cart_with_items(client):\n raise NotImplemented('Acceptance test failed')",
"def test_view_displays_all(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.context['user']), 'test')\n self.assertEqual(len(response.context['data']), 1)"
] | [
"0.72330284",
"0.71533865",
"0.70860183",
"0.6867175",
"0.6790764",
"0.67349434",
"0.6666839",
"0.6636514",
"0.66316766",
"0.6597527",
"0.65619427",
"0.6444273",
"0.6425292",
"0.642249",
"0.6402104",
"0.64012486",
"0.6383176",
"0.6383148",
"0.6321953",
"0.6320072",
"0.6299333",
"0.6296548",
"0.62822485",
"0.62611794",
"0.6251649",
"0.624012",
"0.6230861",
"0.6216536",
"0.6212767",
"0.6196214"
] | 0.7486654 | 0 |
Tests that a user cannot view all products in the Inventory with blacklisted token | def test_cannot_view_all_products_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_listing_supplies_unauthenticated(self):\n request = self.factory.get('/api/supplies')\n response = SupplyListView.as_view()(request)\n # no permission\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_attendant_cannot_view_all_sales(self):\n response = self.client.get(\n '/self.base_url/sales',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"You dont have rights to list all sales, contact the system admin\")\n self.assertEqual(response.status_code,401)",
"def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403",
"def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)",
"def test_video_detail_no_permission(\n mock_user_moira_lists, logged_in_apiclient, user_admin_list_data\n):\n client, _ = logged_in_apiclient\n mock_user_moira_lists.return_value = {\"some_other_list\"}\n url = reverse(\n \"video-detail\", kwargs={\"video_key\": user_admin_list_data.video.hexkey}\n )\n result = client.get(url)\n assert result.status_code == status.HTTP_403_FORBIDDEN",
"def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")",
"def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()"
] | [
"0.77733105",
"0.7087671",
"0.67957276",
"0.6761181",
"0.675135",
"0.67509085",
"0.66980356",
"0.6541592",
"0.65259844",
"0.6448291",
"0.64465666",
"0.6436873",
"0.64326245",
"0.64205766",
"0.6409761",
"0.6387554",
"0.6384848",
"0.6369056",
"0.63571316",
"0.63104194",
"0.62819606",
"0.62777966",
"0.6248736",
"0.6216315",
"0.6215546",
"0.6214481",
"0.6206037",
"0.6202165",
"0.6197165",
"0.6163344"
] | 0.8038467 | 0 |
Tests that a user cannot view a product that doesnot exist in the Inventory | def test_view_product_that_doesnot_exist_in_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/2',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'This product does not exist!')
self.assertEqual(resp.status_code, 404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_view_products_from_empty_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'There are no products yet!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)",
"def test_add_cart_item_unauthorized_user(self):\n cart_id = self.cart_item_manager.create_cart('111', 'test cart', False)\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.add_cart_item(catalog=self.catalog,\n user_id='112',\n cart_id=cart_id,\n entity_id='entity_id',\n entity_type='entity_type',\n entity_version='entity_version')",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)",
"def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()",
"def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)",
"def test_visibility_of_not_available_2(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n list_url = reverse('partners:list')\n\n editor = EditorFactory()\n\n request = RequestFactory().get(list_url)\n request.user = editor.user\n response = PartnersListView.as_view()(request)\n\n self.assertNotContains(response, partner.get_absolute_url())",
"def test_get_reusableitem_api_not_public(self):\n\n # user not logged in\n self.client.logout()\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # user logged in and created the Reusable Item\n self.client.force_authenticate(user=self.user_1)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # user logged in and did not create the Reusable Item\n self.client.logout()\n self.client.force_authenticate(user=self.user_2)\n\n response = self.client.get(get_reusable_item_1_url(self))\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_view_product_with_invalid_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2kk',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Try an interger for product id')\n self.assertEqual(resp.status_code, 400)",
"def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)",
"def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)"
] | [
"0.72640157",
"0.6747577",
"0.6742351",
"0.6686189",
"0.66764927",
"0.6665994",
"0.6556591",
"0.6439238",
"0.6401339",
"0.6370489",
"0.6360187",
"0.6351575",
"0.6346178",
"0.63409466",
"0.63361067",
"0.6330177",
"0.6312651",
"0.62697184",
"0.6259748",
"0.62596893",
"0.62184983",
"0.62113905",
"0.6201547",
"0.6200921",
"0.6182439",
"0.6182439",
"0.6182439",
"0.6182439",
"0.61808616",
"0.61482865"
] | 0.73303914 | 0 |
Tests that a user cannot view products from empty Inventory | def test_view_products_from_empty_inventory(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
resp = self.client.get(
'/api/v1/products',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'There are no products yet!')
self.assertEqual(resp.status_code, 404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_admin_cannot_delete_product_from_empty_Inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'There are no products in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_no_items_in_cart(self):\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')",
"def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_login_with_nonempty_cart(client):\n raise NotImplemented('Acceptance test failed')",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)",
"def test_no_enable_shoppingcart(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])",
"def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock",
"def test_purchase_not_available(self):\n purchase_model = {\"id\": 2, \"amount\": 1}\n resp = self.app.post(\"/products/2/purchase\", json=purchase_model, content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n resp = self.app.get(\"/products/2\", content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)",
"def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)",
"def test_get_empty_product_list(self):\n response = self.client().get('/api/v1/products')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"There are no books\")\n self.assertEqual(response.status_code, 404)",
"def test_get_unexisting_products(self):\n response=self.get_unexisting_products()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['message'],\"No Available products\")\n self.assertEqual(response.status_code, 200)",
"def action_confirm(self):\n if any(not l.is_available for l in self.mapped('order_line')):\n raise UserError(_('Some of your products in order does not have enough quantity available'))\n res = super(SaleOrder, self).action_confirm()\n return res",
"def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)",
"def print_inventory_items(items):\r\n if not (len(items) == 0):\r\n wrap_print(\"You have \" + list_of_objects(items) + \".\\n\")\r\n else:\r\n wrap_print(\"You don't have anything.\\n\")",
"def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)",
"def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)"
] | [
"0.74154675",
"0.72096145",
"0.7158583",
"0.693217",
"0.6781853",
"0.6733253",
"0.66969836",
"0.65152705",
"0.64797616",
"0.64615333",
"0.6445138",
"0.6428424",
"0.63962966",
"0.6393635",
"0.63872916",
"0.63662773",
"0.633816",
"0.6332057",
"0.63152456",
"0.6308194",
"0.6305822",
"0.628019",
"0.6269049",
"0.6258291",
"0.6256637",
"0.6233273",
"0.62296253",
"0.620884",
"0.6199793",
"0.6197927"
] | 0.78474796 | 0 |
Tests that a user cannot view a product with invalid id | def test_view_product_with_invalid_id(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.get(
'/api/v1/products/2kk',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Try an interger for product id')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_detail_odd_product_id_permission(self):\n self.assertEqual(self.product_2.id, 2)\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_2.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_ProductsDataViewSet_with_get_request_Invalid_id(self):\n # Request the data by API call.\n response = self.client.get('/api/productsdata/{}/'.format(-1))\n\n # Checking the response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json()['detail'], 'Not found.')",
"def test_admin_cannot_delete_product_with_non_integer_prod_id(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The product id should be a number!')\n self.assertEqual(resp.status_code, 400)",
"def test_request_membership_form_with_an_invalid_user_id(self):\n pass",
"def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.trait.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_security_on_get(self):\n # test the listing url\n product = Product.objects.all()[0]\n url = '/product/xml/'\n response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)\n # test the product detail url\n url = '/product/xml/%s/' % product.item_number\n Response = self.client.get(url)\n self.failUnlessEqual(response.status_code, 401)",
"def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()",
"def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.dataset.pk + 1))\n self.assertEqual(response.status_code, 404)",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_delete_product_non_valid_pk(self):\n product_pk = 9999\n product_count_before = models.Product.objects.count()\n url = reverse('products:detail', kwargs={'pk': product_pk})\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)",
"def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))",
"def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)",
"def test_visibility_of_not_available_1(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n with self.assertRaises(Http404):\n # We must explicitly pass kwargs to the view even though they are\n # implied by the URL.\n _ = PartnersDetailView.as_view()(request, pk=partner.pk)",
"async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)",
"def test_api_url_no_id(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer?pid=uid3600-33034440-48'\n assert extract_product_id_from_api_url(url) is None",
"def test_detail_is_hacker_permission(self):\n self.user_1.username = 'pythonhacker'\n self.user_1.save()\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get(\n '/api/products/{}/'.format(self.product_1.id), **headers)\n\n expected = {'detail': 'You do not have permission to perform this action.'}\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)",
"def test_details_nonnum_id(self):\n self.check_response(\n '/attributes/xyz',\n ('Please enter an integer value for Attribute ID',))",
"def test_visibility_of_not_available_3(self):\n partner = PartnerFactory(status=Partner.NOT_AVAILABLE)\n detail_url = partner.get_absolute_url()\n\n editor = EditorFactory()\n editor.user.is_staff = True\n editor.user.save()\n\n request = RequestFactory().get(detail_url)\n request.user = editor.user\n\n # This should not raise Http404.\n response = PartnersDetailView.as_view()(request, pk=partner.pk)\n self.assertEqual(response.status_code, 200)"
] | [
"0.7096591",
"0.6907471",
"0.6797254",
"0.6654126",
"0.665064",
"0.6630695",
"0.6613442",
"0.6613442",
"0.6613442",
"0.66119534",
"0.65806687",
"0.6550584",
"0.6499338",
"0.64979565",
"0.64979565",
"0.64979565",
"0.6494183",
"0.64578736",
"0.64060956",
"0.63708884",
"0.63435787",
"0.63145936",
"0.6301092",
"0.6291938",
"0.62912893",
"0.6271717",
"0.6244162",
"0.622503",
"0.6213876",
"0.61919767"
] | 0.75562716 | 0 |
Test that product cannot be updated successfully with blacklisted token | def test_cannot_update_product_with_blacklisted_token(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
resp = self.client.delete(
'/api/v1/logout',
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'You are successfully logged out!')
self.assertEqual(resp.status_code, 200)
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_cannot_create_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))",
"def test_cannot_update_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_cannot_get_sale_record_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)",
"def test_wrong_token(self):\n token = str((jwt.encode(\n {\"email\": \"[email protected]\"},\n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token+\"wrong\",\n {\"password\": \"bagenda1234\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['error'],\n \"verification link is invalid.\")",
"def test_attendant_cannot_make_a_sale_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_token_missing_edit(self):\n with self.client:\n id = self.get_id()\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": \"\"}))\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data.get('message'), \"Token is missing\")",
"def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_deny_pending_payment(self):\n pass",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_no_unlisted(self):\n Version.objects.get(pk=self.version_1_2_2).update(\n channel=amo.RELEASE_CHANNEL_UNLISTED)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_update_ban(self):\n pass",
"def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)",
"def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r",
"def test_update_not_matching_token(\n self, registered_user: user_models.User,\n valid_header_dict_with_user_id: Dict[str, Any]):\n update_json_payload = get_valid_update_request(registered_user)\n response = get_response_from_json(update_json_payload,\n valid_header_dict_with_user_id)\n\n assert not check_response_valid_update(response)\n assert not check_fields_updated_correctly(registered_user,\n update_json_payload)\n assert response.status_code == 401",
"def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)",
"def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)"
] | [
"0.7377064",
"0.7333218",
"0.7190565",
"0.7068335",
"0.70310974",
"0.69859517",
"0.67677313",
"0.6604328",
"0.65338165",
"0.65273803",
"0.6446804",
"0.6441972",
"0.64315784",
"0.64071953",
"0.6401291",
"0.6390641",
"0.6355562",
"0.6326239",
"0.6304661",
"0.62910193",
"0.6279447",
"0.6265347",
"0.6247338",
"0.6210322",
"0.62019455",
"0.62015384",
"0.6186053",
"0.61702967",
"0.6142926",
"0.6135204"
] | 0.80966264 | 0 |
Test that you cant updated a nonexistant product | def test_update_nonexistant_product(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], "This product doesn't exists in the Inventory!")
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)",
"def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')",
"def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_cannot_sale_nonexistant_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"Paris_heels\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product is not in the Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_non_existent(cards_db):\n i = 123 # any number will do, db is empty\n with pytest.raises(InvalidCardId):\n cards_db.update_card(i, Card(summary=\"bar\", owner=\"not me\"))",
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)",
"def test_beneficiaries_update_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-update')\n response = self.client.get(url, content_type='application/json')\n return self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)",
"def test_view_product_that_doesnot_exist_in_inventory(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products/2',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This product does not exist!')\n self.assertEqual(resp.status_code, 404)",
"def test_update_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n product_update = dict(\n prod_name='',\n category='',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def test_request_do_update_non_existent_id(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert DUT.request_do_update(100)",
"def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())",
"def test_patch_a_resource_that_does_not_exist():\n pass",
"def test_add_without_name(self):\n good = GoodInfo(\"\", \"30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)",
"def test_update_inventory_not_found(self):\n new_inventory = {'name': 'conditioner', 'quantity': 1, 'status': 'new'}\n data = json.dumps(new_inventory)\n resp = self.app.put('/inventories/0', data=data, content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())",
"def test_03_product_delete(self):\n product = self.create_product()\n products = self.product_obj.search([])\n self.assertIn(product, products)\n product.unlink()\n self.assertNotIn(product.exists(), products)"
] | [
"0.74883664",
"0.74142987",
"0.7132973",
"0.7130076",
"0.6898503",
"0.6843232",
"0.676572",
"0.6739922",
"0.67206293",
"0.67113554",
"0.66875863",
"0.6684773",
"0.668369",
"0.66656035",
"0.6630668",
"0.6606419",
"0.65949506",
"0.6585263",
"0.65486664",
"0.65191656",
"0.6512701",
"0.6504794",
"0.6485288",
"0.6474551",
"0.64305764",
"0.6416947",
"0.64099246",
"0.64092165",
"0.6385925",
"0.6373567"
] | 0.7994515 | 0 |
Test that product cannot be updated with unauthorised user | def test_unauthorized_product_update(self):
resp = self.admin_create_user()
reply = self.attendant_login()
token = reply['token']
product_update = dict(
prod_name='NY_jeans',
category='denims',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Unauthorized Access!')
self.assertEqual(resp.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_delete_not_my_product(self):\n self._require_login(self.user1)\n response = self.client.delete('/api/1.0/products/2/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_update_author_unlogged(self):\n data = {'name': 'Ken Thompson'}\n\n request = self.client.patch(self.epoint, data)\n\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)",
"def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)",
"def test_cannot_update_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)",
"def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)",
"def test_modify_reusableitem_not_authenticated(self):\n self.client.logout()\n \n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_create_not_admin(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n expected = {'detail': 'You do not have permission to perform this action.'}\n response = self.client.post(\n '/api/products/', data=payload,\n content_type='application/json', **headers)\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.json(), expected)\n self.assertEqual(Product.objects.count(), 2)",
"def test_non_admin_cannot_delete_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/products/1',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_only_admin_can_create_product(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)",
"def test_resuableitem_invalid_vote(self):\n\n original_reusableitem = setup_public_reusable_item_1(self)\n data1 = submit_change_request_1(self, self.user_1)\n\n # user 2 now submits an invalid vote\n self.client.force_authenticate(user=self.user_2)\n\n data2 = {'vote': 'banana'}\n response = self.client.patch(get_reusable_item_1_url(self), data2, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_none_admin_edit(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.put('api/v1/meals/{}'.format(id),\n data=json.dumps(dict(\n meal_name=\"chips\",\n price=15000\n )),\n content_type='application/json',\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)",
"def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')",
"def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='[email protected]', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)",
"def test_admin_cannot_update_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/5',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This user doesn't exist!\")\n self.assertEqual(resp.status_code, 400)",
"def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_admin_cannot_delete_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/products/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This product does not exist in Inventory!')\n self.assertEqual(resp.status_code, 404)",
"def test_create_product_as_customer_fails(self):\n customer = get_user_model().objects.create_user(\n '[email protected]',\n 'Customer',\n 'user123'\n )\n self.client.force_authenticate(customer)\n res = self.client.post(PRODUCTS_URL, PRODUCT_PAYLOAD)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)",
"def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)",
"def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)",
"def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_editing_supplies_unauthenticated(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'bbb'})\n response = SupplyDetailsView.as_view()(request, pk=id)\n # unauthenticated user should get unauthorized error\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)"
] | [
"0.7472202",
"0.74601954",
"0.730332",
"0.72697306",
"0.71795666",
"0.71784",
"0.7131658",
"0.70938385",
"0.7079321",
"0.6975282",
"0.695774",
"0.6886112",
"0.68209416",
"0.68174875",
"0.6791423",
"0.6772019",
"0.6765322",
"0.6752393",
"0.67434675",
"0.66997343",
"0.6691479",
"0.66883683",
"0.66813403",
"0.66810966",
"0.6675983",
"0.667394",
"0.6670071",
"0.66646767",
"0.6650377",
"0.6643716"
] | 0.80988926 | 0 |
Test that product cannot be updated with empty fields | def test_update_product_with_empty_fields(self):
resp = self.admin_register()
reply = self.admin_login()
token = reply['token']
product = dict(
prod_name='NY_denims',
category='denims',
stock=20,
price=150
)
resp = self.client.post(
'/api/v1/products',
content_type='application/json',
data=json.dumps(product),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'Product successfully added to Inventory!')
self.assertEqual(resp.status_code, 201)
product_update = dict(
prod_name='',
category='',
stock=50,
price=180
)
resp = self.client.put(
'/api/v1/products/1',
content_type='application/json',
data=json.dumps(product_update),
headers={'Authorization': 'Bearer {}'.format(token)}
)
reply = json.loads(resp.data.decode())
self.assertEqual(reply['message'], 'prod_name and category cannot be empty!')
self.assertEqual(resp.status_code, 400) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)",
"def test_update_nonexistant_product(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], \"This product doesn't exists in the Inventory!\")\n self.assertEqual(resp.status_code, 400)",
"def test_update_not_my_product(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/2/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_admin_cannot_create_product_with_empty_fields(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='',\n category='',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter all fields!')\n self.assertEqual(resp.status_code, 400)",
"def test_cannot_update_with_empty_field(self):\n\n self.client.login(username='notlogged', password='notlogged')\n group_fields = ['name', 'description']\n\n utils.test_cannot_post_with_empty_fields(self, self.url, group_fields)\n\n # Group is not updated.\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, 'test')\n self.assertEqual(updated_group.description, 'test')\n self.assertIsNone(updated_group.last_edit_date)",
"def test_partial_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n\n payload = {\n 'name': 'Updated name',\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.patch(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')",
"def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_update_with_no_matches(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 15}, name=\"Mark\")\n assert n_updated == 0\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)",
"def test_update_product_to_not_selling(self):\n self._require_login(self.user1)\n post_data = {\n \"category\": {\n \"name\": \"deportes\",\n \"index\": 1\n },\n \"name\": \"Producto 1 modified\",\n \"description\": \"Descripcion de producto 1 modified\",\n \"selling\": False,\n \"price\": 60,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], 'Producto 1 modified')\n self.assertEqual(response.data['description'], 'Descripcion de producto 1 modified')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '60.0')\n self.assertEqual(response.data['category']['name'], 'deportes')",
"async def test_update_missing_field(self):\n await self.collection.create({'id': 'foo', 'value': 'bar'})\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.update('foo', {})\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def test_update_product_not_found(self):\n test_product = ProductFactory()\n resp = self.app.put(\n \"/products/0\",\n json=test_product.serialize(),\n content_type=\"application/json\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)",
"def test_update_product_without_authentication(self):\n post_data = {\n \"category\": {\n \"name\": \"general\",\n \"index\": 0\n },\n \"name\": \"Producto 2 modified\",\n \"description\": \"Descripcion de producto 2 modified\",\n \"selling\": True,\n \"price\": 20,\n }\n\n response = self.client.put('/api/1.0/products/1/', data=post_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)",
"def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_empty_data(self, client, users):\n user = users[0]\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)",
"def test_invalid_update_kwarg(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_update_cart_invalid_attributes(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'InvalidAttribute': 'Cart2'})\n self.assertEqual('Cart1', self.cart_item_manager.get_cart(user_id, cart_id)['CartName'])",
"def test_full_update(self):\n self.assertEqual(Product.objects.count(), 2)\n self.assertEqual(self.product_1.name, 'Nike Vapor')\n self.assertEqual(self.product_1.sku, '44444444')\n self.assertEqual(self.product_1.category, self.category_1)\n self.assertEqual(self.product_1.description, 'Some product description')\n self.assertEqual(self.product_1.price, 129.99)\n self.assertEqual(self.product_1.featured, False)\n\n payload = {\n 'name': 'Updated name',\n 'category': self.category_2.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99,\n 'featured': True\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.put(\n '/api/products/{}/'.format(self.product_1.id),\n data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 2)\n\n product = Product.objects.get(id=self.product_1.id)\n self.assertEqual(product.name, 'Updated name')\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.category, self.category_2)\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)\n self.assertEqual(product.featured, True)",
"def test_update_customer_invalid_payload(self):\n update_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 1})\n\n payload = {\"first_name\": \"Dennis\", \"last_name\": \"\", \"is_active\": True}\n\n response = self.client.put(update_customer_url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)",
"def test_unauthorized_product_update(self):\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n product_update = dict(\n prod_name='NY_jeans',\n category='denims',\n stock=50,\n price=180\n )\n resp = self.client.put(\n '/api/v1/products/1',\n content_type='application/json',\n data=json.dumps(product_update),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_full_update_product(self):\n view = ProductUpdateView.as_view({'patch': 'update'})\n uri = reverse('products:update-product', kwargs={'pk': self.product_id})\n data = {\n \"id\": self.product_id,\n \"name\": \"Headphone updated\",\n \"description\": \"New version\",\n \"price\": \"800\",\n \"price_currency\": \"USD\",\n \"is_available\": True\n }\n request = self.factory.patch(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request, pk=self.product_id)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')\n data['price'] = float(data['price'])\n response.data['price'] = float(response.data['price'])\n self.assertEqual(response.data, data)",
"def test_do_cell_update_ignores_unknown_fields(self, mock_update):\n client = mock.Mock()\n inventory = mock.Mock()\n inventory.cells = cells.CellManager(mock.ANY,\n mock.ANY,\n 'http://127.0.0.1/')\n client.inventory = mock.Mock(name='inventory')\n client.inventory.return_value = inventory\n invalid_input = Namespace(region=1,\n id=1,\n name='mock_cell',\n invalid=True)\n cells_shell.do_cell_update(client, invalid_input)\n vars(invalid_input).pop('region')\n vars(invalid_input).pop('invalid')\n mock_update.assert_called_once_with(**vars(invalid_input))",
"def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)",
"def test_update_customer_fails(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n with self.assertRaises(IntegrityError):\n customer.email = None\n customer.save()"
] | [
"0.802753",
"0.7568224",
"0.7292608",
"0.7203753",
"0.7033387",
"0.69645137",
"0.6949159",
"0.6943705",
"0.69149756",
"0.691329",
"0.690093",
"0.6899394",
"0.6895893",
"0.68875086",
"0.6824652",
"0.6816774",
"0.68002254",
"0.6785967",
"0.6772013",
"0.67623484",
"0.6751942",
"0.6750954",
"0.671549",
"0.6710964",
"0.66895324",
"0.66697335",
"0.66623724",
"0.66439843",
"0.662813",
"0.6616762"
] | 0.8045699 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.